mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
io_uring: restore back registered wait arguments
Now we've got a more generic region registration API, place IORING_ENTER_EXT_ARG_REG and re-enable it. First, the user has to register a region with the IORING_MEM_REGION_REG_WAIT_ARG flag set. It can only be done for a ring in a disabled state, aka IORING_SETUP_R_DISABLED, to avoid races with already running waiters. With that we should have stable constant values for ctx->cq_wait_{size,arg} in io_get_ext_arg_reg() and hence no READ_ONCE required. The other API difference is that we're now passing byte offsets instead of indexes. The user _must_ align all offsets / pointers to the native word size, failing to do so might but not necessarily has to lead to a failure usually returned as -EFAULT. liburing will be hiding this details from users. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/81822c1b4ffbe8ad391b4f9ad1564def0d26d990.1731689588.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
93238e6618
commit
d617b3147d
@ -324,6 +324,9 @@ struct io_ring_ctx {
|
||||
unsigned cq_entries;
|
||||
struct io_ev_fd __rcu *io_ev_fd;
|
||||
unsigned cq_extra;
|
||||
|
||||
void *cq_wait_arg;
|
||||
size_t cq_wait_size;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/*
|
||||
|
@ -663,6 +663,11 @@ struct io_uring_region_desc {
|
||||
__u64 __resv[4];
|
||||
};
|
||||
|
||||
enum {
|
||||
/* expose the region as registered wait arguments */
|
||||
IORING_MEM_REGION_REG_WAIT_ARG = 1,
|
||||
};
|
||||
|
||||
struct io_uring_mem_region_reg {
|
||||
__u64 region_uptr; /* struct io_uring_region_desc * */
|
||||
__u64 flags;
|
||||
|
@ -3195,7 +3195,19 @@ void __io_uring_cancel(bool cancel_all)
|
||||
static struct io_uring_reg_wait *io_get_ext_arg_reg(struct io_ring_ctx *ctx,
|
||||
const struct io_uring_getevents_arg __user *uarg)
|
||||
{
|
||||
unsigned long size = sizeof(struct io_uring_reg_wait);
|
||||
unsigned long offset = (uintptr_t)uarg;
|
||||
unsigned long end;
|
||||
|
||||
if (unlikely(offset % sizeof(long)))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
/* also protects from NULL ->cq_wait_arg as the size would be 0 */
|
||||
if (unlikely(check_add_overflow(offset, size, &end) ||
|
||||
end > ctx->cq_wait_size))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
return ctx->cq_wait_arg + offset;
|
||||
}
|
||||
|
||||
static int io_validate_ext_arg(struct io_ring_ctx *ctx, unsigned flags,
|
||||
|
@ -588,7 +588,16 @@ static int io_register_mem_region(struct io_ring_ctx *ctx, void __user *uarg)
|
||||
|
||||
if (memchr_inv(®.__resv, 0, sizeof(reg.__resv)))
|
||||
return -EINVAL;
|
||||
if (reg.flags)
|
||||
if (reg.flags & ~IORING_MEM_REGION_REG_WAIT_ARG)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* This ensures there are no waiters. Waiters are unlocked and it's
|
||||
* hard to synchronise with them, especially if we need to initialise
|
||||
* the region.
|
||||
*/
|
||||
if ((reg.flags & IORING_MEM_REGION_REG_WAIT_ARG) &&
|
||||
!(ctx->flags & IORING_SETUP_R_DISABLED))
|
||||
return -EINVAL;
|
||||
|
||||
ret = io_create_region(ctx, &ctx->param_region, &rd);
|
||||
@ -598,6 +607,11 @@ static int io_register_mem_region(struct io_ring_ctx *ctx, void __user *uarg)
|
||||
io_free_region(ctx, &ctx->param_region);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (reg.flags & IORING_MEM_REGION_REG_WAIT_ARG) {
|
||||
ctx->cq_wait_arg = io_region_get_ptr(&ctx->param_region);
|
||||
ctx->cq_wait_size = rd.size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user