mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
io_uring: don't iterate cq wait fast path
Task work runners keep running until all queues tw items are exhausted. It's also rare for defer tw to queue normal tw and vise versa. Taking it into account, there is only a dim chance that further iterating the io_cqring_wait() fast path will get us anything and so we can remove the loop there. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/1f9565726661266abaa5d921e97433c831759ecf.1672916894.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
0c4fe008c9
commit
f36ba6cf1a
@ -2510,18 +2510,16 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
|
||||
|
||||
if (!io_allowed_run_tw(ctx))
|
||||
return -EEXIST;
|
||||
|
||||
do {
|
||||
/* always run at least 1 task work to process local work */
|
||||
ret = io_run_task_work_ctx(ctx);
|
||||
if (!llist_empty(&ctx->work_llist)) {
|
||||
ret = io_run_local_work(ctx);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
io_cqring_overflow_flush(ctx);
|
||||
|
||||
/* if user messes with these they will just get an early return */
|
||||
if (__io_cqring_events_user(ctx) >= min_events)
|
||||
return 0;
|
||||
} while (ret > 0);
|
||||
}
|
||||
io_run_task_work();
|
||||
io_cqring_overflow_flush(ctx);
|
||||
/* if user messes with these they will just get an early return */
|
||||
if (__io_cqring_events_user(ctx) >= min_events)
|
||||
return 0;
|
||||
|
||||
if (sig) {
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
Loading…
Reference in New Issue
Block a user