forked from Minki/linux
io_uring: fill extra big cqe fields from req
The only user of io_req_complete32()-like functions is cmd requests. Instead of keeping the whole complete32 family, remove them and provide the extras in already added for inline completions req->extra{1,2}. When fill_cqe_res() finds CQE32 option enabled it'll use those fields to fill a 32B cqe. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/af1319eb661b1f9a0abceb51cbbf72b8002e019d.1655287457.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f43de1f888
commit
29ede2014c
@ -2513,33 +2513,6 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx,
|
||||
struct io_kiocb *req)
|
||||
{
|
||||
struct io_uring_cqe *cqe;
|
||||
u64 extra1 = req->extra1;
|
||||
u64 extra2 = req->extra2;
|
||||
|
||||
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
|
||||
req->cqe.res, req->cqe.flags, extra1, extra2);
|
||||
|
||||
/*
|
||||
* If we can't get a cq entry, userspace overflowed the
|
||||
* submission (by quite a lot). Increment the overflow count in
|
||||
* the ring.
|
||||
*/
|
||||
cqe = io_get_cqe(ctx);
|
||||
if (likely(cqe)) {
|
||||
memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
|
||||
cqe->big_cqe[0] = extra1;
|
||||
cqe->big_cqe[1] = extra2;
|
||||
return true;
|
||||
}
|
||||
|
||||
return io_cqring_event_overflow(ctx, req->cqe.user_data, req->cqe.res,
|
||||
req->cqe.flags, extra1, extra2);
|
||||
}
|
||||
|
||||
static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
|
||||
s32 res, u32 cflags)
|
||||
{
|
||||
@ -2590,19 +2563,6 @@ static void __io_req_complete_post(struct io_kiocb *req, s32 res,
|
||||
__io_req_complete_put(req);
|
||||
}
|
||||
|
||||
static void __io_req_complete_post32(struct io_kiocb *req, s32 res,
|
||||
u32 cflags, u64 extra1, u64 extra2)
|
||||
{
|
||||
if (!(req->flags & REQ_F_CQE_SKIP)) {
|
||||
req->cqe.res = res;
|
||||
req->cqe.flags = cflags;
|
||||
req->extra1 = extra1;
|
||||
req->extra2 = extra2;
|
||||
__io_fill_cqe32_req(req->ctx, req);
|
||||
}
|
||||
__io_req_complete_put(req);
|
||||
}
|
||||
|
||||
static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
@ -2614,18 +2574,6 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags)
|
||||
io_cqring_ev_posted(ctx);
|
||||
}
|
||||
|
||||
static void io_req_complete_post32(struct io_kiocb *req, s32 res,
|
||||
u32 cflags, u64 extra1, u64 extra2)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
spin_lock(&ctx->completion_lock);
|
||||
__io_req_complete_post32(req, res, cflags, extra1, extra2);
|
||||
io_commit_cqring(ctx);
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
io_cqring_ev_posted(ctx);
|
||||
}
|
||||
|
||||
static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
|
||||
u32 cflags)
|
||||
{
|
||||
@ -2643,19 +2591,6 @@ static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
|
||||
io_req_complete_post(req, res, cflags);
|
||||
}
|
||||
|
||||
static inline void __io_req_complete32(struct io_kiocb *req,
|
||||
unsigned int issue_flags, s32 res,
|
||||
u32 cflags, u64 extra1, u64 extra2)
|
||||
{
|
||||
if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
|
||||
io_req_complete_state(req, res, cflags);
|
||||
req->extra1 = extra1;
|
||||
req->extra2 = extra2;
|
||||
} else {
|
||||
io_req_complete_post32(req, res, cflags, extra1, extra2);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void io_req_complete(struct io_kiocb *req, s32 res)
|
||||
{
|
||||
if (res < 0)
|
||||
@ -5079,6 +5014,13 @@ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
|
||||
|
||||
static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
|
||||
u64 extra1, u64 extra2)
|
||||
{
|
||||
req->extra1 = extra1;
|
||||
req->extra2 = extra2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by consumers of io_uring_cmd, if they originally returned
|
||||
* -EIOCBQUEUED upon receiving the command.
|
||||
@ -5089,10 +5031,10 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
|
||||
|
||||
if (ret < 0)
|
||||
req_set_fail(req);
|
||||
|
||||
if (req->ctx->flags & IORING_SETUP_CQE32)
|
||||
__io_req_complete32(req, 0, ret, 0, res2, 0);
|
||||
else
|
||||
io_req_complete(req, ret);
|
||||
io_req_set_cqe32_extra(req, res2, 0);
|
||||
io_req_complete(req, ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user