io_uring: refactor io_req_defer()
Rename io_req_defer() into io_drain_req() and refactor it uncoupling it from io_queue_sqe() error handling and preparing for coming optimisations. Also, prioritise non IOSQE_ASYNC path. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/4f17dd56e7fbe52d1866f8acd8efe3284d2bebcb.1623709150.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
0499e582aa
commit
76cc33d791
@ -5998,7 +5998,7 @@ static u32 io_get_sequence(struct io_kiocb *req)
|
|||||||
return ctx->cached_sq_head - nr_reqs;
|
return ctx->cached_sq_head - nr_reqs;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int io_req_defer(struct io_kiocb *req)
|
static bool io_drain_req(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
struct io_defer_entry *de;
|
struct io_defer_entry *de;
|
||||||
@ -6008,27 +6008,29 @@ static int io_req_defer(struct io_kiocb *req)
|
|||||||
/* Still need defer if there is pending req in defer list. */
|
/* Still need defer if there is pending req in defer list. */
|
||||||
if (likely(list_empty_careful(&ctx->defer_list) &&
|
if (likely(list_empty_careful(&ctx->defer_list) &&
|
||||||
!(req->flags & REQ_F_IO_DRAIN)))
|
!(req->flags & REQ_F_IO_DRAIN)))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
seq = io_get_sequence(req);
|
seq = io_get_sequence(req);
|
||||||
/* Still a chance to pass the sequence check */
|
/* Still a chance to pass the sequence check */
|
||||||
if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
|
if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
ret = io_req_prep_async(req);
|
ret = io_req_prep_async(req);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
io_prep_async_link(req);
|
io_prep_async_link(req);
|
||||||
de = kmalloc(sizeof(*de), GFP_KERNEL);
|
de = kmalloc(sizeof(*de), GFP_KERNEL);
|
||||||
if (!de)
|
if (!de) {
|
||||||
return -ENOMEM;
|
io_req_complete_failed(req, ret);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irq(&ctx->completion_lock);
|
spin_lock_irq(&ctx->completion_lock);
|
||||||
if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
|
if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
|
||||||
spin_unlock_irq(&ctx->completion_lock);
|
spin_unlock_irq(&ctx->completion_lock);
|
||||||
kfree(de);
|
kfree(de);
|
||||||
io_queue_async_work(req);
|
io_queue_async_work(req);
|
||||||
return -EIOCBQUEUED;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_io_uring_defer(ctx, req, req->user_data);
|
trace_io_uring_defer(ctx, req, req->user_data);
|
||||||
@ -6036,7 +6038,7 @@ static int io_req_defer(struct io_kiocb *req)
|
|||||||
de->seq = seq;
|
de->seq = seq;
|
||||||
list_add_tail(&de->list, &ctx->defer_list);
|
list_add_tail(&de->list, &ctx->defer_list);
|
||||||
spin_unlock_irq(&ctx->completion_lock);
|
spin_unlock_irq(&ctx->completion_lock);
|
||||||
return -EIOCBQUEUED;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_clean_op(struct io_kiocb *req)
|
static void io_clean_op(struct io_kiocb *req)
|
||||||
@ -6447,21 +6449,18 @@ static void __io_queue_sqe(struct io_kiocb *req)
|
|||||||
|
|
||||||
static void io_queue_sqe(struct io_kiocb *req)
|
static void io_queue_sqe(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
int ret;
|
if (io_drain_req(req))
|
||||||
|
return;
|
||||||
|
|
||||||
ret = io_req_defer(req);
|
if (likely(!(req->flags & REQ_F_FORCE_ASYNC))) {
|
||||||
if (ret) {
|
|
||||||
if (ret != -EIOCBQUEUED) {
|
|
||||||
fail_req:
|
|
||||||
io_req_complete_failed(req, ret);
|
|
||||||
}
|
|
||||||
} else if (req->flags & REQ_F_FORCE_ASYNC) {
|
|
||||||
ret = io_req_prep_async(req);
|
|
||||||
if (unlikely(ret))
|
|
||||||
goto fail_req;
|
|
||||||
io_queue_async_work(req);
|
|
||||||
} else {
|
|
||||||
__io_queue_sqe(req);
|
__io_queue_sqe(req);
|
||||||
|
} else {
|
||||||
|
int ret = io_req_prep_async(req);
|
||||||
|
|
||||||
|
if (unlikely(ret))
|
||||||
|
io_req_complete_failed(req, ret);
|
||||||
|
else
|
||||||
|
io_queue_async_work(req);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user