mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
io_uring: overflow processing for CQE32
This adds the overflow processing for large CQE's. This adds two parameters to the io_cqring_event_overflow function and uses these fields to initialize the large CQE fields. Allocate enough space for large CQE's in the overflow structue. If no large CQE's are used, the size of the allocation is unchanged. The cqe field can have a different size depending if its a large CQE or not. To be able to allocate different sizes, the two fields in the structure are re-ordered. Co-developed-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Stefan Roesch <shr@fb.com> Reviewed-by: Kanchan Joshi <joshi.k@samsung.com> Link: https://lore.kernel.org/r/20220426182134.136504-9-shr@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
0e2e5c47fe
commit
e45a3e0500
@ -220,8 +220,8 @@ struct io_mapped_ubuf {
|
||||
struct io_ring_ctx;
|
||||
|
||||
struct io_overflow_cqe {
|
||||
struct io_uring_cqe cqe;
|
||||
struct list_head list;
|
||||
struct io_uring_cqe cqe;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -2177,10 +2177,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
|
||||
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
|
||||
{
|
||||
bool all_flushed, posted;
|
||||
size_t cqe_size = sizeof(struct io_uring_cqe);
|
||||
|
||||
if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
|
||||
return false;
|
||||
|
||||
if (ctx->flags & IORING_SETUP_CQE32)
|
||||
cqe_size <<= 1;
|
||||
|
||||
posted = false;
|
||||
spin_lock(&ctx->completion_lock);
|
||||
while (!list_empty(&ctx->cq_overflow_list)) {
|
||||
@ -2192,7 +2196,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
|
||||
ocqe = list_first_entry(&ctx->cq_overflow_list,
|
||||
struct io_overflow_cqe, list);
|
||||
if (cqe)
|
||||
memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
|
||||
memcpy(cqe, &ocqe->cqe, cqe_size);
|
||||
else
|
||||
io_account_cq_overflow(ctx);
|
||||
|
||||
@ -2280,11 +2284,17 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
|
||||
}
|
||||
|
||||
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
|
||||
s32 res, u32 cflags)
|
||||
s32 res, u32 cflags, u64 extra1,
|
||||
u64 extra2)
|
||||
{
|
||||
struct io_overflow_cqe *ocqe;
|
||||
size_t ocq_size = sizeof(struct io_overflow_cqe);
|
||||
bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
|
||||
|
||||
ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
|
||||
if (is_cqe32)
|
||||
ocq_size += sizeof(struct io_uring_cqe);
|
||||
|
||||
ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
|
||||
trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
|
||||
if (!ocqe) {
|
||||
/*
|
||||
@ -2304,6 +2314,10 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
|
||||
ocqe->cqe.user_data = user_data;
|
||||
ocqe->cqe.res = res;
|
||||
ocqe->cqe.flags = cflags;
|
||||
if (is_cqe32) {
|
||||
ocqe->cqe.big_cqe[0] = extra1;
|
||||
ocqe->cqe.big_cqe[1] = extra2;
|
||||
}
|
||||
list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
|
||||
return true;
|
||||
}
|
||||
@ -2325,7 +2339,7 @@ static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
|
||||
WRITE_ONCE(cqe->flags, cflags);
|
||||
return true;
|
||||
}
|
||||
return io_cqring_event_overflow(ctx, user_data, res, cflags);
|
||||
return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
|
||||
}
|
||||
|
||||
static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
|
||||
@ -2347,7 +2361,7 @@ static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
|
||||
return true;
|
||||
}
|
||||
return io_cqring_event_overflow(ctx, req->cqe.user_data,
|
||||
req->cqe.res, req->cqe.flags);
|
||||
req->cqe.res, req->cqe.flags, 0, 0);
|
||||
}
|
||||
|
||||
static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
|
||||
@ -2373,8 +2387,8 @@ static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
|
||||
return true;
|
||||
}
|
||||
|
||||
return io_cqring_event_overflow(ctx, req->cqe.user_data,
|
||||
req->cqe.res, req->cqe.flags);
|
||||
return io_cqring_event_overflow(ctx, req->cqe.user_data, req->cqe.res,
|
||||
req->cqe.flags, extra1, extra2);
|
||||
}
|
||||
|
||||
static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
|
||||
@ -2411,7 +2425,7 @@ static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags
|
||||
return;
|
||||
}
|
||||
|
||||
io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags);
|
||||
io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags, extra1, extra2);
|
||||
}
|
||||
|
||||
static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
|
||||
|
Loading…
Reference in New Issue
Block a user