mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
io_uring: revise completion_lock locking
io_kill_timeouts() doesn't post any events but queues everything to task_work. Locking there is needed for protecting linked requests traversing, we should grab completion_lock directly instead of using io_cq_[un]lock helpers. Same goes for __io_req_find_next_prep(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/88e75d481a65dc295cb59722bb1cf76402d1c06b.1670002973.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
ea011ee102
commit
6971253f07
@ -597,6 +597,18 @@ static inline void __io_cq_unlock(struct io_ring_ctx *ctx)
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
}
|
||||
|
||||
static inline void io_cq_lock(struct io_ring_ctx *ctx)
|
||||
__acquires(ctx->completion_lock)
|
||||
{
|
||||
spin_lock(&ctx->completion_lock);
|
||||
}
|
||||
|
||||
static inline void io_cq_unlock(struct io_ring_ctx *ctx)
|
||||
__releases(ctx->completion_lock)
|
||||
{
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
}
|
||||
|
||||
/* keep it inlined for io_submit_flush_completions() */
|
||||
static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
|
||||
__releases(ctx->completion_lock)
|
||||
@ -1074,9 +1086,9 @@ static void __io_req_find_next_prep(struct io_kiocb *req)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
io_cq_lock(ctx);
|
||||
spin_lock(&ctx->completion_lock);
|
||||
io_disarm_next(req);
|
||||
io_cq_unlock_post(ctx);
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
}
|
||||
|
||||
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
|
||||
|
@ -87,17 +87,6 @@ static inline void io_req_task_work_add(struct io_kiocb *req)
|
||||
#define io_for_each_link(pos, head) \
|
||||
for (pos = (head); pos; pos = pos->link)
|
||||
|
||||
static inline void io_cq_lock(struct io_ring_ctx *ctx)
|
||||
__acquires(ctx->completion_lock)
|
||||
{
|
||||
spin_lock(&ctx->completion_lock);
|
||||
}
|
||||
|
||||
static inline void io_cq_unlock(struct io_ring_ctx *ctx)
|
||||
{
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
}
|
||||
|
||||
void io_cq_unlock_post(struct io_ring_ctx *ctx);
|
||||
|
||||
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
|
||||
|
@ -624,7 +624,11 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
|
||||
struct io_timeout *timeout, *tmp;
|
||||
int canceled = 0;
|
||||
|
||||
io_cq_lock(ctx);
|
||||
/*
|
||||
* completion_lock is needed for io_match_task(). Take it before
|
||||
* timeout_lockfirst to keep locking ordering.
|
||||
*/
|
||||
spin_lock(&ctx->completion_lock);
|
||||
spin_lock_irq(&ctx->timeout_lock);
|
||||
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
|
||||
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
|
||||
@ -634,6 +638,6 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
|
||||
canceled++;
|
||||
}
|
||||
spin_unlock_irq(&ctx->timeout_lock);
|
||||
io_cq_unlock_post(ctx);
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
return canceled != 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user