io_uring/rsrc: add io_rsrc_node_lookup() helper

There are lots of spots open-coding this functionality, add a generic
helper that does the node lookup in a speculation safe way.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2024-10-27 09:08:31 -06:00
parent 3597f2786b
commit b54a14041e
12 changed files with 57 additions and 59 deletions

View File

@ -240,10 +240,12 @@ static int __io_sync_cancel(struct io_uring_task *tctx,
/* fixed must be grabbed every time since we drop the uring_lock */ /* fixed must be grabbed every time since we drop the uring_lock */
if ((cd->flags & IORING_ASYNC_CANCEL_FD) && if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
(cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) { (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
if (unlikely(fd >= ctx->file_table.data.nr)) struct io_rsrc_node *node;
node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
if (unlikely(!node))
return -EBADF; return -EBADF;
fd = array_index_nospec(fd, ctx->file_table.data.nr); cd->file = io_slot_file(node);
cd->file = io_file_from_index(&ctx->file_table, fd);
if (!cd->file) if (!cd->file)
return -EBADF; return -EBADF;
} }

View File

@ -58,7 +58,7 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
u32 slot_index) u32 slot_index)
__must_hold(&req->ctx->uring_lock) __must_hold(&req->ctx->uring_lock)
{ {
struct io_rsrc_node *node; struct io_rsrc_node *node, *old_node;
if (io_is_uring_fops(file)) if (io_is_uring_fops(file))
return -EBADF; return -EBADF;
@ -71,9 +71,9 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
if (!node) if (!node)
return -ENOMEM; return -ENOMEM;
slot_index = array_index_nospec(slot_index, ctx->file_table.data.nr); old_node = io_rsrc_node_lookup(&ctx->file_table.data, slot_index);
if (ctx->file_table.data.nodes[slot_index]) if (old_node)
io_put_rsrc_node(ctx->file_table.data.nodes[slot_index]); io_put_rsrc_node(old_node);
else else
io_file_bitmap_set(&ctx->file_table, slot_index); io_file_bitmap_set(&ctx->file_table, slot_index);
@ -123,15 +123,17 @@ int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset) int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset)
{ {
struct io_rsrc_node *node;
if (unlikely(!ctx->file_table.data.nr)) if (unlikely(!ctx->file_table.data.nr))
return -ENXIO; return -ENXIO;
if (offset >= ctx->file_table.data.nr) if (offset >= ctx->file_table.data.nr)
return -EINVAL; return -EINVAL;
offset = array_index_nospec(offset, ctx->file_table.data.nr); node = io_rsrc_node_lookup(&ctx->file_table.data, offset);
if (!ctx->file_table.data.nodes[offset]) if (!node)
return -EBADF; return -EBADF;
io_put_rsrc_node(ctx->file_table.data.nodes[offset]); io_put_rsrc_node(node);
ctx->file_table.data.nodes[offset] = NULL; ctx->file_table.data.nodes[offset] = NULL;
io_file_bitmap_clear(&ctx->file_table, offset); io_file_bitmap_clear(&ctx->file_table, offset);
return 0; return 0;

View File

@ -52,7 +52,7 @@ static inline struct file *io_slot_file(struct io_rsrc_node *node)
static inline struct file *io_file_from_index(struct io_file_table *table, static inline struct file *io_file_from_index(struct io_file_table *table,
int index) int index)
{ {
struct io_rsrc_node *node = table->data.nodes[index]; struct io_rsrc_node *node = io_rsrc_node_lookup(&table->data, index);
if (node) if (node)
return io_slot_file(node); return io_slot_file(node);

View File

@ -1879,16 +1879,12 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
struct file *file = NULL; struct file *file = NULL;
io_ring_submit_lock(ctx, issue_flags); io_ring_submit_lock(ctx, issue_flags);
if (unlikely((unsigned int)fd >= ctx->file_table.data.nr)) node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
goto out;
fd = array_index_nospec(fd, ctx->file_table.data.nr);
node = ctx->file_table.data.nodes[fd];
if (node) { if (node) {
io_req_assign_rsrc_node(req, node); io_req_assign_rsrc_node(req, node);
req->flags |= io_slot_flags(node); req->flags |= io_slot_flags(node);
file = io_slot_file(node); file = io_slot_file(node);
} }
out:
io_ring_submit_unlock(ctx, issue_flags); io_ring_submit_unlock(ctx, issue_flags);
return file; return file;
} }

View File

@ -172,22 +172,24 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
return __io_msg_ring_data(target_ctx, msg, issue_flags); return __io_msg_ring_data(target_ctx, msg, issue_flags);
} }
static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) static int io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct file *file = NULL; struct io_rsrc_node *node;
int idx = msg->src_fd; int ret = -EBADF;
io_ring_submit_lock(ctx, issue_flags); io_ring_submit_lock(ctx, issue_flags);
if (likely(idx < ctx->file_table.data.nr)) { node = io_rsrc_node_lookup(&ctx->file_table.data, msg->src_fd);
idx = array_index_nospec(idx, ctx->file_table.data.nr); if (node) {
file = io_file_from_index(&ctx->file_table, idx); msg->src_file = io_slot_file(node);
if (file) if (msg->src_file)
get_file(file); get_file(msg->src_file);
req->flags |= REQ_F_NEED_CLEANUP;
ret = 0;
} }
io_ring_submit_unlock(ctx, issue_flags); io_ring_submit_unlock(ctx, issue_flags);
return file; return ret;
} }
static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags) static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
@ -256,7 +258,6 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
struct io_ring_ctx *target_ctx = req->file->private_data; struct io_ring_ctx *target_ctx = req->file->private_data;
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct file *src_file = msg->src_file;
if (msg->len) if (msg->len)
return -EINVAL; return -EINVAL;
@ -264,12 +265,10 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
return -EINVAL; return -EINVAL;
if (target_ctx->flags & IORING_SETUP_R_DISABLED) if (target_ctx->flags & IORING_SETUP_R_DISABLED)
return -EBADFD; return -EBADFD;
if (!src_file) { if (!msg->src_file) {
src_file = io_msg_grab_file(req, issue_flags); int ret = io_msg_grab_file(req, issue_flags);
if (!src_file) if (unlikely(ret))
return -EBADF; return ret;
msg->src_file = src_file;
req->flags |= REQ_F_NEED_CLEANUP;
} }
if (io_msg_need_remote(target_ctx)) if (io_msg_need_remote(target_ctx))

View File

@ -1343,13 +1343,11 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
if (sr->flags & IORING_RECVSEND_FIXED_BUF) { if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_rsrc_node *node; struct io_rsrc_node *node;
int idx;
ret = -EFAULT; ret = -EFAULT;
io_ring_submit_lock(ctx, issue_flags); io_ring_submit_lock(ctx, issue_flags);
if (sr->buf_index < ctx->buf_table.nr) { node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
idx = array_index_nospec(sr->buf_index, ctx->buf_table.nr); if (node) {
node = ctx->buf_table.nodes[idx];
io_req_assign_rsrc_node(sr->notif, node); io_req_assign_rsrc_node(sr->notif, node);
ret = 0; ret = 0;
} }

View File

@ -62,13 +62,11 @@ int io_nop(struct io_kiocb *req, unsigned int issue_flags)
if (nop->flags & IORING_NOP_FIXED_BUFFER) { if (nop->flags & IORING_NOP_FIXED_BUFFER) {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_rsrc_node *node; struct io_rsrc_node *node;
int idx;
ret = -EFAULT; ret = -EFAULT;
io_ring_submit_lock(ctx, issue_flags); io_ring_submit_lock(ctx, issue_flags);
if (nop->buffer < ctx->buf_table.nr) { node = io_rsrc_node_lookup(&ctx->buf_table, nop->buffer);
idx = array_index_nospec(nop->buffer, ctx->buf_table.nr); if (node) {
node = READ_ONCE(ctx->buf_table.nodes[idx]);
io_req_assign_rsrc_node(req, node); io_req_assign_rsrc_node(req, node);
ret = 0; ret = 0;
} }

View File

@ -181,6 +181,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
return -EINVAL; return -EINVAL;
for (done = 0; done < nr_args; done++) { for (done = 0; done < nr_args; done++) {
struct io_rsrc_node *node;
u64 tag = 0; u64 tag = 0;
if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) || if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
@ -195,9 +196,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
if (fd == IORING_REGISTER_FILES_SKIP) if (fd == IORING_REGISTER_FILES_SKIP)
continue; continue;
i = array_index_nospec(up->offset + done, ctx->file_table.data.nr); i = up->offset + done;
if (ctx->file_table.data.nodes[i]) { node = io_rsrc_node_lookup(&ctx->file_table.data, i);
io_put_rsrc_node(ctx->file_table.data.nodes[i]); if (node) {
io_put_rsrc_node(node);
ctx->file_table.data.nodes[i] = NULL; ctx->file_table.data.nodes[i] = NULL;
io_file_bitmap_clear(&ctx->file_table, i); io_file_bitmap_clear(&ctx->file_table, i);
} }
@ -958,9 +960,9 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
goto out_unlock; goto out_unlock;
for (i = 0; i < nbufs; i++) { for (i = 0; i < nbufs; i++) {
struct io_rsrc_node *src_node = src_ctx->buf_table.nodes[i]; struct io_rsrc_node *dst_node, *src_node;
struct io_rsrc_node *dst_node;
src_node = io_rsrc_node_lookup(&src_ctx->buf_table, i);
if (src_node == rsrc_empty_node) { if (src_node == rsrc_empty_node) {
dst_node = rsrc_empty_node; dst_node = rsrc_empty_node;
} else { } else {

View File

@ -70,6 +70,14 @@ int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
extern const struct io_rsrc_node empty_node; extern const struct io_rsrc_node empty_node;
#define rsrc_empty_node (struct io_rsrc_node *) &empty_node #define rsrc_empty_node (struct io_rsrc_node *) &empty_node
static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data,
int index)
{
if (index < data->nr)
return data->nodes[array_index_nospec(index, data->nr)];
return NULL;
}
static inline void io_put_rsrc_node(struct io_rsrc_node *node) static inline void io_put_rsrc_node(struct io_rsrc_node *node)
{ {
if (node != rsrc_empty_node && !--node->refs) if (node != rsrc_empty_node && !--node->refs)

View File

@ -332,17 +332,15 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_rsrc_node *node; struct io_rsrc_node *node;
struct io_async_rw *io; struct io_async_rw *io;
u16 index;
int ret; int ret;
ret = io_prep_rw(req, sqe, ddir, false); ret = io_prep_rw(req, sqe, ddir, false);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
if (unlikely(req->buf_index >= ctx->buf_table.nr)) node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
if (!node)
return -EFAULT; return -EFAULT;
index = array_index_nospec(req->buf_index, ctx->buf_table.nr);
node = ctx->buf_table.nodes[index];
io_req_assign_rsrc_node(req, node); io_req_assign_rsrc_node(req, node);
io = req->async_data; io = req->async_data;

View File

@ -66,17 +66,13 @@ static struct file *io_splice_get_file(struct io_kiocb *req,
return io_file_get_normal(req, sp->splice_fd_in); return io_file_get_normal(req, sp->splice_fd_in);
io_ring_submit_lock(ctx, issue_flags); io_ring_submit_lock(ctx, issue_flags);
if (unlikely(sp->splice_fd_in >= ctx->file_table.data.nr)) node = io_rsrc_node_lookup(&ctx->file_table.data, sp->splice_fd_in);
goto out;
sp->splice_fd_in = array_index_nospec(sp->splice_fd_in, ctx->file_table.data.nr);
node = ctx->file_table.data.nodes[sp->splice_fd_in];
if (node) { if (node) {
node->refs++; node->refs++;
sp->rsrc_node = node; sp->rsrc_node = node;
file = io_slot_file(node); file = io_slot_file(node);
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
} }
out:
io_ring_submit_unlock(ctx, issue_flags); io_ring_submit_unlock(ctx, issue_flags);
return file; return file;
} }

View File

@ -209,18 +209,17 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (ioucmd->flags & IORING_URING_CMD_FIXED) { if (ioucmd->flags & IORING_URING_CMD_FIXED) {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
u16 index; struct io_rsrc_node *node;
index = READ_ONCE(sqe->buf_index); node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
if (unlikely(index >= ctx->buf_table.nr)) if (unlikely(!node))
return -EFAULT; return -EFAULT;
req->buf_index = array_index_nospec(index, ctx->buf_table.nr);
/* /*
* Pi node upfront, prior to io_uring_cmd_import_fixed() * Pi node upfront, prior to io_uring_cmd_import_fixed()
* being called. This prevents destruction of the mapped buffer * being called. This prevents destruction of the mapped buffer
* we'll need at actual import time. * we'll need at actual import time.
*/ */
io_req_assign_rsrc_node(req, ctx->buf_table.nodes[req->buf_index]); io_req_assign_rsrc_node(req, node);
} }
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);