mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
io_uring: move max entry definition and ring sizing into header
In preparation for needing this somewhere else, move the definitions for the maximum CQ and SQ ring size into io_uring.h. Make the rings_size() helper available as well, and have it take just the setup flags argument rather than the fill ring pointer. That's all that is needed. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
882dec6c39
commit
09d0a8ea7f
@ -105,9 +105,6 @@
|
||||
#include "alloc_cache.h"
|
||||
#include "eventfd.h"
|
||||
|
||||
#define IORING_MAX_ENTRIES 32768
|
||||
#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
|
||||
|
||||
#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
|
||||
IOSQE_IO_HARDLINK | IOSQE_ASYNC)
|
||||
|
||||
@ -2667,8 +2664,8 @@ static void io_rings_free(struct io_ring_ctx *ctx)
|
||||
ctx->sq_sqes = NULL;
|
||||
}
|
||||
|
||||
static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
|
||||
unsigned int cq_entries, size_t *sq_offset)
|
||||
unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
|
||||
unsigned int cq_entries, size_t *sq_offset)
|
||||
{
|
||||
struct io_rings *rings;
|
||||
size_t off, sq_array_size;
|
||||
@ -2676,7 +2673,7 @@ static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries
|
||||
off = struct_size(rings, cqes, cq_entries);
|
||||
if (off == SIZE_MAX)
|
||||
return SIZE_MAX;
|
||||
if (ctx->flags & IORING_SETUP_CQE32) {
|
||||
if (flags & IORING_SETUP_CQE32) {
|
||||
if (check_shl_overflow(off, 1, &off))
|
||||
return SIZE_MAX;
|
||||
}
|
||||
@ -2687,7 +2684,7 @@ static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries
|
||||
return SIZE_MAX;
|
||||
#endif
|
||||
|
||||
if (ctx->flags & IORING_SETUP_NO_SQARRAY) {
|
||||
if (flags & IORING_SETUP_NO_SQARRAY) {
|
||||
*sq_offset = SIZE_MAX;
|
||||
return off;
|
||||
}
|
||||
@ -3434,7 +3431,8 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
|
||||
ctx->sq_entries = p->sq_entries;
|
||||
ctx->cq_entries = p->cq_entries;
|
||||
|
||||
size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
|
||||
size = rings_size(ctx->flags, p->sq_entries, p->cq_entries,
|
||||
&sq_array_offset);
|
||||
if (size == SIZE_MAX)
|
||||
return -EOVERFLOW;
|
||||
|
||||
|
@ -65,6 +65,11 @@ static inline bool io_should_wake(struct io_wait_queue *iowq)
|
||||
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
|
||||
}
|
||||
|
||||
#define IORING_MAX_ENTRIES 32768
|
||||
#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
|
||||
|
||||
unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
|
||||
unsigned int cq_entries, size_t *sq_offset);
|
||||
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
|
||||
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
||||
void io_req_defer_failed(struct io_kiocb *req, s32 res);
|
||||
|
Loading…
Reference in New Issue
Block a user