mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
io_uring: add CQE32 completion processing
This adds the completion processing for the large CQE's and makes sure that the extra1 and extra2 fields are passed through. Co-developed-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Stefan Roesch <shr@fb.com> Reviewed-by: Kanchan Joshi <joshi.k@samsung.com> Link: https://lore.kernel.org/r/20220426182134.136504-6-shr@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
916587984f
commit
effcf8bdeb
@ -2407,18 +2407,15 @@ static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
|
||||
return __io_fill_cqe(ctx, user_data, res, cflags);
|
||||
}
|
||||
|
||||
static void __io_req_complete_post(struct io_kiocb *req, s32 res,
|
||||
u32 cflags)
|
||||
static void __io_req_complete_put(struct io_kiocb *req)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
if (!(req->flags & REQ_F_CQE_SKIP))
|
||||
__io_fill_cqe_req(req, res, cflags);
|
||||
/*
|
||||
* If we're the last reference to this request, add to our locked
|
||||
* free_list cache.
|
||||
*/
|
||||
if (req_ref_put_and_test(req)) {
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
if (req->flags & IO_REQ_LINK_FLAGS) {
|
||||
if (req->flags & IO_DISARM_MASK)
|
||||
io_disarm_next(req);
|
||||
@ -2441,8 +2438,23 @@ static void __io_req_complete_post(struct io_kiocb *req, s32 res,
|
||||
}
|
||||
}
|
||||
|
||||
static void io_req_complete_post(struct io_kiocb *req, s32 res,
|
||||
u32 cflags)
|
||||
static void __io_req_complete_post(struct io_kiocb *req, s32 res,
|
||||
u32 cflags)
|
||||
{
|
||||
if (!(req->flags & REQ_F_CQE_SKIP))
|
||||
__io_fill_cqe_req(req, res, cflags);
|
||||
__io_req_complete_put(req);
|
||||
}
|
||||
|
||||
static void __io_req_complete_post32(struct io_kiocb *req, s32 res,
|
||||
u32 cflags, u64 extra1, u64 extra2)
|
||||
{
|
||||
if (!(req->flags & REQ_F_CQE_SKIP))
|
||||
__io_fill_cqe32_req(req, res, cflags, extra1, extra2);
|
||||
__io_req_complete_put(req);
|
||||
}
|
||||
|
||||
static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
@ -2453,6 +2465,18 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res,
|
||||
io_cqring_ev_posted(ctx);
|
||||
}
|
||||
|
||||
static void io_req_complete_post32(struct io_kiocb *req, s32 res,
|
||||
u32 cflags, u64 extra1, u64 extra2)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
spin_lock(&ctx->completion_lock);
|
||||
__io_req_complete_post32(req, res, cflags, extra1, extra2);
|
||||
io_commit_cqring(ctx);
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
io_cqring_ev_posted(ctx);
|
||||
}
|
||||
|
||||
static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
|
||||
u32 cflags)
|
||||
{
|
||||
@ -2470,6 +2494,19 @@ static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
|
||||
io_req_complete_post(req, res, cflags);
|
||||
}
|
||||
|
||||
static inline void __io_req_complete32(struct io_kiocb *req,
|
||||
unsigned int issue_flags, s32 res,
|
||||
u32 cflags, u64 extra1, u64 extra2)
|
||||
{
|
||||
if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
|
||||
io_req_complete_state(req, res, cflags);
|
||||
req->extra1 = extra1;
|
||||
req->extra2 = extra2;
|
||||
} else {
|
||||
io_req_complete_post32(req, res, cflags, extra1, extra2);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void io_req_complete(struct io_kiocb *req, s32 res)
|
||||
{
|
||||
__io_req_complete(req, 0, res, 0);
|
||||
|
Loading…
Reference in New Issue
Block a user