mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
io_uring: abstract out a bit of the ring filling logic
Abstract out a io_uring_fill_params() helper, which fills out the necessary bits of struct io_uring_params. Add it to io_uring.h as well, in preparation for having another internal user of it. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
09d0a8ea7f
commit
81d8191eb9
@ -3498,14 +3498,8 @@ static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
|
|||||||
O_RDWR | O_CLOEXEC, NULL);
|
O_RDWR | O_CLOEXEC, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
int io_uring_fill_params(unsigned entries, struct io_uring_params *p)
|
||||||
struct io_uring_params __user *params)
|
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx;
|
|
||||||
struct io_uring_task *tctx;
|
|
||||||
struct file *file;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!entries)
|
if (!entries)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (entries > IORING_MAX_ENTRIES) {
|
if (entries > IORING_MAX_ENTRIES) {
|
||||||
@ -3547,6 +3541,42 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
|||||||
p->cq_entries = 2 * p->sq_entries;
|
p->cq_entries = 2 * p->sq_entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p->sq_off.head = offsetof(struct io_rings, sq.head);
|
||||||
|
p->sq_off.tail = offsetof(struct io_rings, sq.tail);
|
||||||
|
p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
|
||||||
|
p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
|
||||||
|
p->sq_off.flags = offsetof(struct io_rings, sq_flags);
|
||||||
|
p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
|
||||||
|
p->sq_off.resv1 = 0;
|
||||||
|
if (!(p->flags & IORING_SETUP_NO_MMAP))
|
||||||
|
p->sq_off.user_addr = 0;
|
||||||
|
|
||||||
|
p->cq_off.head = offsetof(struct io_rings, cq.head);
|
||||||
|
p->cq_off.tail = offsetof(struct io_rings, cq.tail);
|
||||||
|
p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
|
||||||
|
p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
|
||||||
|
p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
|
||||||
|
p->cq_off.cqes = offsetof(struct io_rings, cqes);
|
||||||
|
p->cq_off.flags = offsetof(struct io_rings, cq_flags);
|
||||||
|
p->cq_off.resv1 = 0;
|
||||||
|
if (!(p->flags & IORING_SETUP_NO_MMAP))
|
||||||
|
p->cq_off.user_addr = 0;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
||||||
|
struct io_uring_params __user *params)
|
||||||
|
{
|
||||||
|
struct io_ring_ctx *ctx;
|
||||||
|
struct io_uring_task *tctx;
|
||||||
|
struct file *file;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = io_uring_fill_params(entries, p);
|
||||||
|
if (unlikely(ret))
|
||||||
|
return ret;
|
||||||
|
|
||||||
ctx = io_ring_ctx_alloc(p);
|
ctx = io_ring_ctx_alloc(p);
|
||||||
if (!ctx)
|
if (!ctx)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -3630,6 +3660,9 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
if (!(p->flags & IORING_SETUP_NO_SQARRAY))
|
||||||
|
p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
|
||||||
|
|
||||||
ret = io_sq_offload_create(ctx, p);
|
ret = io_sq_offload_create(ctx, p);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
@ -3638,29 +3671,6 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
p->sq_off.head = offsetof(struct io_rings, sq.head);
|
|
||||||
p->sq_off.tail = offsetof(struct io_rings, sq.tail);
|
|
||||||
p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
|
|
||||||
p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
|
|
||||||
p->sq_off.flags = offsetof(struct io_rings, sq_flags);
|
|
||||||
p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
|
|
||||||
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
|
|
||||||
p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
|
|
||||||
p->sq_off.resv1 = 0;
|
|
||||||
if (!(ctx->flags & IORING_SETUP_NO_MMAP))
|
|
||||||
p->sq_off.user_addr = 0;
|
|
||||||
|
|
||||||
p->cq_off.head = offsetof(struct io_rings, cq.head);
|
|
||||||
p->cq_off.tail = offsetof(struct io_rings, cq.tail);
|
|
||||||
p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
|
|
||||||
p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
|
|
||||||
p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
|
|
||||||
p->cq_off.cqes = offsetof(struct io_rings, cqes);
|
|
||||||
p->cq_off.flags = offsetof(struct io_rings, cq_flags);
|
|
||||||
p->cq_off.resv1 = 0;
|
|
||||||
if (!(ctx->flags & IORING_SETUP_NO_MMAP))
|
|
||||||
p->cq_off.user_addr = 0;
|
|
||||||
|
|
||||||
p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
|
p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
|
||||||
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
|
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
|
||||||
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
|
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
|
||||||
|
@ -70,6 +70,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq)
|
|||||||
|
|
||||||
unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
|
unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
|
||||||
unsigned int cq_entries, size_t *sq_offset);
|
unsigned int cq_entries, size_t *sq_offset);
|
||||||
|
int io_uring_fill_params(unsigned entries, struct io_uring_params *p);
|
||||||
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
|
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
|
||||||
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
||||||
void io_req_defer_failed(struct io_kiocb *req, s32 res);
|
void io_req_defer_failed(struct io_kiocb *req, s32 res);
|
||||||
|
Loading…
Reference in New Issue
Block a user