mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
io_uring: fix __tctx_task_work() ctx race
There is an unlikely but possible race using a freed context. That's
because req->task_work.func() can free a request, but we won't
necessarily find a completion in submit_state.comp and so all ctx refs
may be put by the time we do mutex_lock(&ctx->uring_ctx);
There are several reasons why it can miss going through
submit_state.comp: 1) req->task_work.func() didn't complete it itself,
but punted to iowq (e.g. reissue) and it got freed later, or a similar
situation with it overflowing and getting flushed by someone else, or
being submitted to IRQ completion, 2) As we don't hold the uring_lock,
someone else can do io_submit_flush_completions() and put our ref.
3) Bugs and code obscurities, e.g. failing to propagate issue_flags
properly.
One example is as follows
CPU1 | CPU2
=======================================================================
@req->task_work.func() |
-> @req overflwed, |
so submit_state.comp,nr==0 |
| flush overflows, and free @req
| ctx refs == 0, free it
ctx is dead, but we do |
lock + flush + unlock |
So take a ctx reference for each new ctx we see in __tctx_task_work(),
and do release it until we do all our flushing.
Fixes: 65453d1efb
("io_uring: enable req cache for task_work items")
Reported-by: syzbot+a157ac7c03a56397f553@syzkaller.appspotmail.com
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
[axboe: fold in my one-liner and fix ref mismatch]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
0d30b3e7ee
commit
2c32395d81
@ -1800,6 +1800,18 @@ static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
|
||||
return __io_req_find_next(req);
|
||||
}
|
||||
|
||||
static void ctx_flush_and_put(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (!ctx)
|
||||
return;
|
||||
if (ctx->submit_state.comp.nr) {
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
io_submit_flush_completions(&ctx->submit_state.comp, ctx);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
}
|
||||
percpu_ref_put(&ctx->refs);
|
||||
}
|
||||
|
||||
static bool __tctx_task_work(struct io_uring_task *tctx)
|
||||
{
|
||||
struct io_ring_ctx *ctx = NULL;
|
||||
@ -1817,30 +1829,20 @@ static bool __tctx_task_work(struct io_uring_task *tctx)
|
||||
node = list.first;
|
||||
while (node) {
|
||||
struct io_wq_work_node *next = node->next;
|
||||
struct io_ring_ctx *this_ctx;
|
||||
struct io_kiocb *req;
|
||||
|
||||
req = container_of(node, struct io_kiocb, io_task_work.node);
|
||||
this_ctx = req->ctx;
|
||||
if (req->ctx != ctx) {
|
||||
ctx_flush_and_put(ctx);
|
||||
ctx = req->ctx;
|
||||
percpu_ref_get(&ctx->refs);
|
||||
}
|
||||
|
||||
req->task_work.func(&req->task_work);
|
||||
node = next;
|
||||
|
||||
if (!ctx) {
|
||||
ctx = this_ctx;
|
||||
} else if (ctx != this_ctx) {
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
io_submit_flush_completions(&ctx->submit_state.comp, ctx);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
ctx = this_ctx;
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx && ctx->submit_state.comp.nr) {
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
io_submit_flush_completions(&ctx->submit_state.comp, ctx);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
}
|
||||
|
||||
ctx_flush_and_put(ctx);
|
||||
return list.first != NULL;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user