io_uring: ensure iopoll runs local task work as well

Combine the two checks we have for task_work running and whether or not
we need to shuffle the mutex into one, so we unify how task_work is run
in the iopoll loop. This helps ensure that local task_work is run when
needed, and also optimizes that path to avoid a mutex shuffle if it's
not needed.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2022-09-03 09:52:01 -06:00
parent 8ac5d85a89
commit dac6a0eae7
2 changed files with 23 additions and 16 deletions

View File

@ -1428,25 +1428,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
* forever, while the workqueue is stuck trying to acquire the * forever, while the workqueue is stuck trying to acquire the
* very same mutex. * very same mutex.
*/ */
if (wq_list_empty(&ctx->iopoll_list)) { if (wq_list_empty(&ctx->iopoll_list) ||
u32 tail = ctx->cached_cq_tail; io_task_work_pending(ctx)) {
if (!llist_empty(&ctx->work_llist))
__io_run_local_work(ctx, true);
if (task_work_pending(current) ||
wq_list_empty(&ctx->iopoll_list)) {
u32 tail = ctx->cached_cq_tail;
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_ctx(ctx); ret = io_run_task_work();
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
if (ret < 0)
break;
/* some requests don't go through iopoll_list */ if (ret < 0)
if (tail != ctx->cached_cq_tail || break;
wq_list_empty(&ctx->iopoll_list))
break;
}
if (task_work_pending(current)) { /* some requests don't go through iopoll_list */
mutex_unlock(&ctx->uring_lock); if (tail != ctx->cached_cq_tail ||
io_run_task_work(); wq_list_empty(&ctx->iopoll_list))
mutex_lock(&ctx->uring_lock); break;
}
} }
ret = io_do_iopoll(ctx, !min); ret = io_do_iopoll(ctx, !min);
if (ret < 0) if (ret < 0)

View File

@ -236,6 +236,12 @@ static inline int io_run_task_work(void)
return 0; return 0;
} }
static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
{
return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
!wq_list_empty(&ctx->work_llist);
}
static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx) static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
{ {
int ret = 0; int ret = 0;