mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
io_uring: remove async request cache
io_req_complete_post() was a sole user of ->locked_free_list, but since we just gutted the function, the cache is not used anymore and can be removed. ->locked_free_list served as an asynhronous counterpart of the main request (i.e. struct io_kiocb) cache for all unlocked cases like io-wq. Now they're all forced to be completed into the main cache directly, off of the normal completion path or via io_free_req(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/7bffccd213e370abd4de480e739d8b08ab6c1326.1712331455.git.asml.silence@gmail.com Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
de96e9ae69
commit
d9713ad3fa
@ -346,10 +346,6 @@ struct io_ring_ctx {
|
||||
|
||||
spinlock_t completion_lock;
|
||||
|
||||
/* IRQ completion list, under ->completion_lock */
|
||||
unsigned int locked_free_nr;
|
||||
struct io_wq_work_list locked_free_list;
|
||||
|
||||
struct list_head io_buffers_comp;
|
||||
struct list_head cq_overflow_list;
|
||||
struct io_hash_table cancel_table;
|
||||
|
@ -334,7 +334,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
init_llist_head(&ctx->work_llist);
|
||||
INIT_LIST_HEAD(&ctx->tctx_list);
|
||||
ctx->submit_state.free_list.next = NULL;
|
||||
INIT_WQ_LIST(&ctx->locked_free_list);
|
||||
INIT_HLIST_HEAD(&ctx->waitid_list);
|
||||
#ifdef CONFIG_FUTEX
|
||||
INIT_HLIST_HEAD(&ctx->futex_list);
|
||||
@ -988,15 +987,6 @@ static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
|
||||
memset(&req->big_cqe, 0, sizeof(req->big_cqe));
|
||||
}
|
||||
|
||||
static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
|
||||
struct io_submit_state *state)
|
||||
{
|
||||
spin_lock(&ctx->completion_lock);
|
||||
wq_list_splice(&ctx->locked_free_list, &state->free_list);
|
||||
ctx->locked_free_nr = 0;
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* A request might get retired back into the request caches even before opcode
|
||||
* handlers and io_issue_sqe() are done with it, e.g. inline completion path.
|
||||
@ -1010,17 +1000,6 @@ __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
|
||||
void *reqs[IO_REQ_ALLOC_BATCH];
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If we have more than a batch's worth of requests in our IRQ side
|
||||
* locked cache, grab the lock and move them over to our submission
|
||||
* side cache.
|
||||
*/
|
||||
if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
|
||||
io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
|
||||
if (!io_req_cache_empty(ctx))
|
||||
return true;
|
||||
}
|
||||
|
||||
ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
|
||||
|
||||
/*
|
||||
@ -2654,7 +2633,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
|
||||
int nr = 0;
|
||||
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
|
||||
|
||||
while (!io_req_cache_empty(ctx)) {
|
||||
req = io_extract_req(ctx);
|
||||
|
Loading…
Reference in New Issue
Block a user