mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
io_uring/uring_cmd: switch to always allocating async data
Basic conversion ensuring async_data is allocated off the prep path. Adds a basic alloc cache as well, as passthrough IO can be quite high in rate. Tested-by: Anuj Gupta <anuj20.g@samsung.com> Reviewed-by: Anuj Gupta <anuj20.g@samsung.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e2ea5a7069
commit
d10f19dff5
@ -301,6 +301,7 @@ struct io_ring_ctx {
|
||||
struct io_alloc_cache apoll_cache;
|
||||
struct io_alloc_cache netmsg_cache;
|
||||
struct io_alloc_cache rw_cache;
|
||||
struct io_alloc_cache uring_cache;
|
||||
|
||||
/*
|
||||
* Any cancelable uring_cmd is added to this list in
|
||||
|
@ -313,6 +313,8 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
sizeof(struct io_async_msghdr));
|
||||
io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
|
||||
sizeof(struct io_async_rw));
|
||||
io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
|
||||
sizeof(struct uring_cache));
|
||||
io_futex_cache_init(ctx);
|
||||
init_completion(&ctx->ref_comp);
|
||||
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
|
||||
@ -2826,6 +2828,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
||||
io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
|
||||
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
|
||||
io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
|
||||
io_alloc_cache_free(&ctx->uring_cache, io_uring_cache_free);
|
||||
io_futex_cache_free(ctx);
|
||||
io_destroy_buffers(ctx);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
|
@ -677,7 +677,6 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
[IORING_OP_URING_CMD] = {
|
||||
.name = "URING_CMD",
|
||||
.async_size = 2 * sizeof(struct io_uring_sqe),
|
||||
.prep_async = io_uring_cmd_prep_async,
|
||||
},
|
||||
[IORING_OP_SEND_ZC] = {
|
||||
.name = "SEND_ZC",
|
||||
|
@ -14,6 +14,38 @@
|
||||
#include "rsrc.h"
|
||||
#include "uring_cmd.h"
|
||||
|
||||
static struct uring_cache *io_uring_async_get(struct io_kiocb *req)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_cache_entry *entry;
|
||||
struct uring_cache *cache;
|
||||
|
||||
entry = io_alloc_cache_get(&ctx->uring_cache);
|
||||
if (entry) {
|
||||
cache = container_of(entry, struct uring_cache, cache);
|
||||
req->flags |= REQ_F_ASYNC_DATA;
|
||||
req->async_data = cache;
|
||||
return cache;
|
||||
}
|
||||
if (!io_alloc_async_data(req))
|
||||
return req->async_data;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
||||
struct uring_cache *cache = req->async_data;
|
||||
|
||||
if (issue_flags & IO_URING_F_UNLOCKED)
|
||||
return;
|
||||
if (io_alloc_cache_put(&req->ctx->uring_cache, &cache->cache)) {
|
||||
ioucmd->sqe = NULL;
|
||||
req->async_data = NULL;
|
||||
req->flags &= ~REQ_F_ASYNC_DATA;
|
||||
}
|
||||
}
|
||||
|
||||
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
|
||||
struct task_struct *task, bool cancel_all)
|
||||
{
|
||||
@ -128,6 +160,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
|
||||
io_req_set_res(req, ret, 0);
|
||||
if (req->ctx->flags & IORING_SETUP_CQE32)
|
||||
io_req_set_cqe32_extra(req, res2, 0);
|
||||
io_req_uring_cleanup(req, issue_flags);
|
||||
if (req->ctx->flags & IORING_SETUP_IOPOLL) {
|
||||
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
|
||||
smp_store_release(&req->iopoll_completed, 1);
|
||||
@ -142,13 +175,19 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
|
||||
|
||||
int io_uring_cmd_prep_async(struct io_kiocb *req)
|
||||
static int io_uring_cmd_prep_setup(struct io_kiocb *req,
|
||||
const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
||||
struct uring_cache *cache;
|
||||
|
||||
memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx));
|
||||
ioucmd->sqe = req->async_data;
|
||||
return 0;
|
||||
cache = io_uring_async_get(req);
|
||||
if (cache) {
|
||||
memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
|
||||
ioucmd->sqe = req->async_data;
|
||||
return 0;
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
@ -173,9 +212,9 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
req->imu = ctx->user_bufs[index];
|
||||
io_req_set_rsrc_node(req, ctx, 0);
|
||||
}
|
||||
ioucmd->sqe = sqe;
|
||||
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
|
||||
return 0;
|
||||
|
||||
return io_uring_cmd_prep_setup(req, sqe);
|
||||
}
|
||||
|
||||
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
|
||||
@ -206,23 +245,14 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
|
||||
}
|
||||
|
||||
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
|
||||
if (ret == -EAGAIN) {
|
||||
if (!req_has_async_data(req)) {
|
||||
if (io_alloc_async_data(req))
|
||||
return -ENOMEM;
|
||||
io_uring_cmd_prep_async(req);
|
||||
}
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (ret != -EIOCBQUEUED) {
|
||||
if (ret < 0)
|
||||
req_set_fail(req);
|
||||
io_req_set_res(req, ret, 0);
|
||||
if (ret == -EAGAIN || ret == -EIOCBQUEUED)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return IOU_ISSUE_SKIP_COMPLETE;
|
||||
if (ret < 0)
|
||||
req_set_fail(req);
|
||||
io_req_uring_cleanup(req, issue_flags);
|
||||
io_req_set_res(req, ret, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
|
||||
@ -311,3 +341,8 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
|
||||
#endif
|
||||
|
||||
void io_uring_cache_free(struct io_cache_entry *entry)
|
||||
{
|
||||
kfree(container_of(entry, struct uring_cache, cache));
|
||||
}
|
||||
|
@ -1,8 +1,16 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
struct uring_cache {
|
||||
union {
|
||||
struct io_cache_entry cache;
|
||||
struct io_uring_sqe sqes[2];
|
||||
};
|
||||
};
|
||||
|
||||
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
|
||||
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_uring_cmd_prep_async(struct io_kiocb *req);
|
||||
void io_uring_cache_free(struct io_cache_entry *entry);
|
||||
|
||||
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
|
||||
struct task_struct *task, bool cancel_all);
|
||||
struct task_struct *task, bool cancel_all);
|
||||
|
Loading…
Reference in New Issue
Block a user