forked from Minki/linux
io_uring: remove allocation cache array
We have several of request allocation layers, remove the last one, which is the submit->reqs array, and always use submit->free_reqs instead. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/8547095c35f7a87bab14f6447ecd30a273ed7500.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
6f33b0bc4e
commit
3ab665b74e
@ -310,12 +310,6 @@ struct io_submit_state {
|
|||||||
struct blk_plug plug;
|
struct blk_plug plug;
|
||||||
struct io_submit_link link;
|
struct io_submit_link link;
|
||||||
|
|
||||||
/*
|
|
||||||
* io_kiocb alloc cache
|
|
||||||
*/
|
|
||||||
void *reqs[IO_REQ_CACHE_SIZE];
|
|
||||||
unsigned int free_reqs;
|
|
||||||
|
|
||||||
bool plug_started;
|
bool plug_started;
|
||||||
bool need_plug;
|
bool need_plug;
|
||||||
|
|
||||||
@ -1898,7 +1892,6 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
|
|||||||
static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
|
static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
struct io_submit_state *state = &ctx->submit_state;
|
struct io_submit_state *state = &ctx->submit_state;
|
||||||
int nr;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we have more than a batch's worth of requests in our IRQ side
|
* If we have more than a batch's worth of requests in our IRQ side
|
||||||
@ -1907,20 +1900,7 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
|
|||||||
*/
|
*/
|
||||||
if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
|
if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
|
||||||
io_flush_cached_locked_reqs(ctx, state);
|
io_flush_cached_locked_reqs(ctx, state);
|
||||||
|
return !list_empty(&state->free_list);
|
||||||
nr = state->free_reqs;
|
|
||||||
while (!list_empty(&state->free_list)) {
|
|
||||||
struct io_kiocb *req = list_first_entry(&state->free_list,
|
|
||||||
struct io_kiocb, inflight_entry);
|
|
||||||
|
|
||||||
list_del(&req->inflight_entry);
|
|
||||||
state->reqs[nr++] = req;
|
|
||||||
if (nr == ARRAY_SIZE(state->reqs))
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
state->free_reqs = nr;
|
|
||||||
return nr != 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1934,33 +1914,36 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
|
|||||||
{
|
{
|
||||||
struct io_submit_state *state = &ctx->submit_state;
|
struct io_submit_state *state = &ctx->submit_state;
|
||||||
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
|
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
|
||||||
|
void *reqs[IO_REQ_ALLOC_BATCH];
|
||||||
|
struct io_kiocb *req;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
|
if (likely(!list_empty(&state->free_list) || io_flush_cached_reqs(ctx)))
|
||||||
|
|
||||||
if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
|
|
||||||
goto got_req;
|
goto got_req;
|
||||||
|
|
||||||
ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
|
ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
|
||||||
state->reqs);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bulk alloc is all-or-nothing. If we fail to get a batch,
|
* Bulk alloc is all-or-nothing. If we fail to get a batch,
|
||||||
* retry single alloc to be on the safe side.
|
* retry single alloc to be on the safe side.
|
||||||
*/
|
*/
|
||||||
if (unlikely(ret <= 0)) {
|
if (unlikely(ret <= 0)) {
|
||||||
state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
|
reqs[0] = kmem_cache_alloc(req_cachep, gfp);
|
||||||
if (!state->reqs[0])
|
if (!reqs[0])
|
||||||
return NULL;
|
return NULL;
|
||||||
ret = 1;
|
ret = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < ret; i++)
|
for (i = 0; i < ret; i++) {
|
||||||
io_preinit_req(state->reqs[i], ctx);
|
req = reqs[i];
|
||||||
state->free_reqs = ret;
|
|
||||||
|
io_preinit_req(req, ctx);
|
||||||
|
list_add(&req->inflight_entry, &state->free_list);
|
||||||
|
}
|
||||||
got_req:
|
got_req:
|
||||||
state->free_reqs--;
|
req = list_first_entry(&state->free_list, struct io_kiocb, inflight_entry);
|
||||||
return state->reqs[state->free_reqs];
|
list_del(&req->inflight_entry);
|
||||||
|
return req;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void io_put_file(struct file *file)
|
static inline void io_put_file(struct file *file)
|
||||||
@ -2318,10 +2301,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
|
|||||||
rb->task_refs++;
|
rb->task_refs++;
|
||||||
rb->ctx_refs++;
|
rb->ctx_refs++;
|
||||||
|
|
||||||
if (state->free_reqs != ARRAY_SIZE(state->reqs))
|
list_add(&req->inflight_entry, &state->free_list);
|
||||||
state->reqs[state->free_reqs++] = req;
|
|
||||||
else
|
|
||||||
list_add(&req->inflight_entry, &state->free_list);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
|
static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
|
||||||
@ -9228,12 +9208,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
|
|||||||
struct io_submit_state *state = &ctx->submit_state;
|
struct io_submit_state *state = &ctx->submit_state;
|
||||||
|
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
|
|
||||||
if (state->free_reqs) {
|
|
||||||
kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
|
|
||||||
state->free_reqs = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
io_flush_cached_locked_reqs(ctx, state);
|
io_flush_cached_locked_reqs(ctx, state);
|
||||||
io_req_cache_free(&state->free_list);
|
io_req_cache_free(&state->free_list);
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
|
Loading…
Reference in New Issue
Block a user