2022-06-13 13:07:23 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/namei.h>
|
|
|
|
#include <linux/poll.h>
|
2024-03-12 16:42:27 +00:00
|
|
|
#include <linux/vmalloc.h>
|
2022-06-13 13:07:23 +00:00
|
|
|
#include <linux/io_uring.h>
|
|
|
|
|
|
|
|
#include <uapi/linux/io_uring.h>
|
|
|
|
|
|
|
|
#include "io_uring.h"
|
|
|
|
#include "opdef.h"
|
|
|
|
#include "kbuf.h"
|
2024-03-27 20:59:09 +00:00
|
|
|
#include "memmap.h"
|
2022-06-13 13:07:23 +00:00
|
|
|
|
2023-10-05 00:05:30 +00:00
|
|
|
/* BIDs are addressed by a 16-bit field in a CQE */
|
|
|
|
#define MAX_BIDS_PER_BGID (1 << 16)
|
|
|
|
|
io_uring/kbuf: Use slab for struct io_buffer objects
The allocation of struct io_buffer for metadata of provided buffers is
done through a custom allocator that directly gets pages and
fragments them. But, slab would do just fine, as this is not a hot path
(in fact, it is a deprecated feature) and, by keeping a custom allocator
implementation we lose benefits like tracking, poisoning,
sanitizers. Finally, the custom code is more complex and requires
keeping the list of pages in struct ctx for no good reason. This patch
cleans this path up and just uses slab.
I microbenchmarked it by forcing the allocation of a large number of
objects with the least number of io_uring commands possible (keeping
nbufs=USHRT_MAX), with and without the patch. There is a slight
increase in time spent in the allocation with slab, of course, but even
when allocating to system resources exhaustion, which is not very
realistic and happened around 1/2 billion provided buffers for me, it
wasn't a significant hit in system time. Specially if we think of a
real-world scenario, an application doing register/unregister of
provided buffers will hit ctx->io_buffers_cache more often than actually
going to slab.
Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20231005000531.30800-4-krisman@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-10-05 00:05:31 +00:00
|
|
|
struct kmem_cache *io_buf_cachep;
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
struct io_provide_buf {
|
|
|
|
struct file *file;
|
|
|
|
__u64 addr;
|
|
|
|
__u32 len;
|
|
|
|
__u32 bgid;
|
2023-10-05 00:05:30 +00:00
|
|
|
__u32 nbufs;
|
2022-06-13 13:07:23 +00:00
|
|
|
__u16 bid;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
|
|
|
|
unsigned int bgid)
|
|
|
|
{
|
io_uring: free io_buffer_list entries via RCU
mmap_lock nests under uring_lock out of necessity, as we may be doing
user copies with uring_lock held. However, for mmap of provided buffer
rings, we attempt to grab uring_lock with mmap_lock already held from
do_mmap(). This makes lockdep, rightfully, complain:
WARNING: possible circular locking dependency detected
6.7.0-rc1-00009-gff3337ebaf94-dirty #4438 Not tainted
------------------------------------------------------
buf-ring.t/442 is trying to acquire lock:
ffff00020e1480a8 (&ctx->uring_lock){+.+.}-{3:3}, at: io_uring_validate_mmap_request.isra.0+0x4c/0x140
but task is already holding lock:
ffff0000dc226190 (&mm->mmap_lock){++++}-{3:3}, at: vm_mmap_pgoff+0x124/0x264
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #1 (&mm->mmap_lock){++++}-{3:3}:
__might_fault+0x90/0xbc
io_register_pbuf_ring+0x94/0x488
__arm64_sys_io_uring_register+0x8dc/0x1318
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
-> #0 (&ctx->uring_lock){+.+.}-{3:3}:
__lock_acquire+0x19a0/0x2d14
lock_acquire+0x2e0/0x44c
__mutex_lock+0x118/0x564
mutex_lock_nested+0x20/0x28
io_uring_validate_mmap_request.isra.0+0x4c/0x140
io_uring_mmu_get_unmapped_area+0x3c/0x98
get_unmapped_area+0xa4/0x158
do_mmap+0xec/0x5b4
vm_mmap_pgoff+0x158/0x264
ksys_mmap_pgoff+0x1d4/0x254
__arm64_sys_mmap+0x80/0x9c
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
From that mmap(2) path, we really just need to ensure that the buffer
list doesn't go away from underneath us. For the lower indexed entries,
they never go away until the ring is freed and we can always sanely
reference those as long as the caller has a file reference. For the
higher indexed ones in our xarray, we just need to ensure that the
buffer list remains valid while we return the address of it.
Free the higher indexed io_buffer_list entries via RCU. With that we can
avoid needing ->uring_lock inside mmap(2), and simply hold the RCU read
lock around the buffer list lookup and address check.
To ensure that the arrayed lookup either returns a valid fully formulated
entry via RCU lookup, add an 'is_ready' flag that we access with store
and release memory ordering. This isn't needed for the xarray lookups,
but doesn't hurt either. Since this isn't a fast path, retain it across
both types. Similarly, for the allocated array inside the ctx, ensure
we use the proper load/acquire as setup could in theory be running in
parallel with mmap.
While in there, add a few lockdep checks for documentation purposes.
Cc: stable@vger.kernel.org
Fixes: c56e022c0a27 ("io_uring: add support for user mapped provided buffer ring")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-11-28 00:54:40 +00:00
|
|
|
lockdep_assert_held(&ctx->uring_lock);
|
2022-06-13 13:07:23 +00:00
|
|
|
|
2024-03-13 02:24:21 +00:00
|
|
|
return xa_load(&ctx->io_bl_xa, bgid);
|
2022-06-13 13:07:23 +00:00
|
|
|
}
|
|
|
|
|
2022-06-22 05:55:51 +00:00
|
|
|
static int io_buffer_add_list(struct io_ring_ctx *ctx,
|
|
|
|
struct io_buffer_list *bl, unsigned int bgid)
|
|
|
|
{
|
io_uring: free io_buffer_list entries via RCU
mmap_lock nests under uring_lock out of necessity, as we may be doing
user copies with uring_lock held. However, for mmap of provided buffer
rings, we attempt to grab uring_lock with mmap_lock already held from
do_mmap(). This makes lockdep, rightfully, complain:
WARNING: possible circular locking dependency detected
6.7.0-rc1-00009-gff3337ebaf94-dirty #4438 Not tainted
------------------------------------------------------
buf-ring.t/442 is trying to acquire lock:
ffff00020e1480a8 (&ctx->uring_lock){+.+.}-{3:3}, at: io_uring_validate_mmap_request.isra.0+0x4c/0x140
but task is already holding lock:
ffff0000dc226190 (&mm->mmap_lock){++++}-{3:3}, at: vm_mmap_pgoff+0x124/0x264
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #1 (&mm->mmap_lock){++++}-{3:3}:
__might_fault+0x90/0xbc
io_register_pbuf_ring+0x94/0x488
__arm64_sys_io_uring_register+0x8dc/0x1318
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
-> #0 (&ctx->uring_lock){+.+.}-{3:3}:
__lock_acquire+0x19a0/0x2d14
lock_acquire+0x2e0/0x44c
__mutex_lock+0x118/0x564
mutex_lock_nested+0x20/0x28
io_uring_validate_mmap_request.isra.0+0x4c/0x140
io_uring_mmu_get_unmapped_area+0x3c/0x98
get_unmapped_area+0xa4/0x158
do_mmap+0xec/0x5b4
vm_mmap_pgoff+0x158/0x264
ksys_mmap_pgoff+0x1d4/0x254
__arm64_sys_mmap+0x80/0x9c
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
From that mmap(2) path, we really just need to ensure that the buffer
list doesn't go away from underneath us. For the lower indexed entries,
they never go away until the ring is freed and we can always sanely
reference those as long as the caller has a file reference. For the
higher indexed ones in our xarray, we just need to ensure that the
buffer list remains valid while we return the address of it.
Free the higher indexed io_buffer_list entries via RCU. With that we can
avoid needing ->uring_lock inside mmap(2), and simply hold the RCU read
lock around the buffer list lookup and address check.
To ensure that the arrayed lookup either returns a valid fully formulated
entry via RCU lookup, add an 'is_ready' flag that we access with store
and release memory ordering. This isn't needed for the xarray lookups,
but doesn't hurt either. Since this isn't a fast path, retain it across
both types. Similarly, for the allocated array inside the ctx, ensure
we use the proper load/acquire as setup could in theory be running in
parallel with mmap.
While in there, add a few lockdep checks for documentation purposes.
Cc: stable@vger.kernel.org
Fixes: c56e022c0a27 ("io_uring: add support for user mapped provided buffer ring")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-11-28 00:54:40 +00:00
|
|
|
/*
|
|
|
|
* Store buffer group ID and finally mark the list as visible.
|
|
|
|
* The normal lookup doesn't care about the visibility as we're
|
|
|
|
* always under the ->uring_lock, but the RCU lookup from mmap does.
|
|
|
|
*/
|
2022-06-22 05:55:51 +00:00
|
|
|
bl->bgid = bgid;
|
2024-03-15 22:12:51 +00:00
|
|
|
atomic_set(&bl->refs, 1);
|
2022-06-22 05:55:51 +00:00
|
|
|
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
|
|
|
|
}
|
|
|
|
|
2023-11-06 20:39:07 +00:00
|
|
|
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
|
2022-06-13 13:07:23 +00:00
|
|
|
{
|
|
|
|
struct io_ring_ctx *ctx = req->ctx;
|
|
|
|
struct io_buffer_list *bl;
|
|
|
|
struct io_buffer *buf;
|
|
|
|
|
|
|
|
io_ring_submit_lock(ctx, issue_flags);
|
|
|
|
|
|
|
|
buf = req->kbuf;
|
|
|
|
bl = io_buffer_get_list(ctx, buf->bgid);
|
|
|
|
list_add(&buf->list, &bl->buf_list);
|
|
|
|
req->flags &= ~REQ_F_BUFFER_SELECTED;
|
|
|
|
req->buf_index = buf->bgid;
|
|
|
|
|
|
|
|
io_ring_submit_unlock(ctx, issue_flags);
|
2023-11-06 20:39:07 +00:00
|
|
|
return true;
|
2022-06-13 13:07:23 +00:00
|
|
|
}
|
|
|
|
|
2024-01-30 03:59:18 +00:00
|
|
|
void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
|
2022-06-16 09:22:00 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We can add this buffer back to two lists:
|
|
|
|
*
|
|
|
|
* 1) The io_buffers_cache list. This one is protected by the
|
|
|
|
* ctx->uring_lock. If we already hold this lock, add back to this
|
|
|
|
* list as we can grab it from issue as well.
|
|
|
|
* 2) The io_buffers_comp list. This one is protected by the
|
|
|
|
* ctx->completion_lock.
|
|
|
|
*
|
|
|
|
* We migrate buffers from the comp_list to the issue cache list
|
|
|
|
* when we need one.
|
|
|
|
*/
|
2024-01-30 03:59:18 +00:00
|
|
|
if (issue_flags & IO_URING_F_UNLOCKED) {
|
2022-06-16 09:22:00 +00:00
|
|
|
struct io_ring_ctx *ctx = req->ctx;
|
|
|
|
|
|
|
|
spin_lock(&ctx->completion_lock);
|
2024-01-30 03:59:18 +00:00
|
|
|
__io_put_kbuf_list(req, &ctx->io_buffers_comp);
|
2022-06-16 09:22:00 +00:00
|
|
|
spin_unlock(&ctx->completion_lock);
|
|
|
|
} else {
|
|
|
|
lockdep_assert_held(&req->ctx->uring_lock);
|
|
|
|
|
2024-01-30 03:59:18 +00:00
|
|
|
__io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
|
2022-06-16 09:22:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
|
|
|
|
struct io_buffer_list *bl)
|
|
|
|
{
|
|
|
|
if (!list_empty(&bl->buf_list)) {
|
|
|
|
struct io_buffer *kbuf;
|
|
|
|
|
|
|
|
kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
|
|
|
|
list_del(&kbuf->list);
|
2022-06-30 09:12:20 +00:00
|
|
|
if (*len == 0 || *len > kbuf->len)
|
2022-06-13 13:07:23 +00:00
|
|
|
*len = kbuf->len;
|
2024-02-20 04:38:59 +00:00
|
|
|
if (list_empty(&bl->buf_list))
|
|
|
|
req->flags |= REQ_F_BL_EMPTY;
|
2022-06-13 13:07:23 +00:00
|
|
|
req->flags |= REQ_F_BUFFER_SELECTED;
|
|
|
|
req->kbuf = kbuf;
|
|
|
|
req->buf_index = kbuf->bid;
|
|
|
|
return u64_to_user_ptr(kbuf->addr);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2024-03-05 14:31:52 +00:00
|
|
|
static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
|
|
|
|
struct io_buffer_list *bl,
|
|
|
|
struct iovec *iov)
|
|
|
|
{
|
|
|
|
void __user *buf;
|
|
|
|
|
|
|
|
buf = io_provided_buffer_select(req, len, bl);
|
|
|
|
if (unlikely(!buf))
|
|
|
|
return -ENOBUFS;
|
|
|
|
|
|
|
|
iov[0].iov_base = buf;
|
|
|
|
iov[0].iov_len = *len;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct io_uring_buf *io_ring_head_to_buf(struct io_uring_buf_ring *br,
|
|
|
|
__u16 head, __u16 mask)
|
|
|
|
{
|
|
|
|
return &br->bufs[head & mask];
|
|
|
|
}
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
|
|
|
|
struct io_buffer_list *bl,
|
|
|
|
unsigned int issue_flags)
|
|
|
|
{
|
|
|
|
struct io_uring_buf_ring *br = bl->buf_ring;
|
2024-02-20 04:38:59 +00:00
|
|
|
__u16 tail, head = bl->head;
|
2022-06-13 13:07:23 +00:00
|
|
|
struct io_uring_buf *buf;
|
|
|
|
|
2024-02-20 04:38:59 +00:00
|
|
|
tail = smp_load_acquire(&br->tail);
|
|
|
|
if (unlikely(tail == head))
|
2022-06-13 13:07:23 +00:00
|
|
|
return NULL;
|
|
|
|
|
2024-02-20 04:38:59 +00:00
|
|
|
if (head + 1 == tail)
|
|
|
|
req->flags |= REQ_F_BL_EMPTY;
|
|
|
|
|
2024-03-05 14:31:52 +00:00
|
|
|
buf = io_ring_head_to_buf(br, head, bl->mask);
|
2022-06-30 09:12:20 +00:00
|
|
|
if (*len == 0 || *len > buf->len)
|
2022-06-13 13:07:23 +00:00
|
|
|
*len = buf->len;
|
2024-03-05 14:31:52 +00:00
|
|
|
req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
|
2022-06-13 13:07:23 +00:00
|
|
|
req->buf_list = bl;
|
|
|
|
req->buf_index = buf->bid;
|
|
|
|
|
2024-01-29 03:08:24 +00:00
|
|
|
if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
|
2022-06-13 13:07:23 +00:00
|
|
|
/*
|
|
|
|
* If we came in unlocked, we have no choice but to consume the
|
2022-06-17 05:04:29 +00:00
|
|
|
* buffer here, otherwise nothing ensures that the buffer won't
|
|
|
|
* get used by others. This does mean it'll be pinned until the
|
|
|
|
* IO completes, coming in unlocked means we're being called from
|
|
|
|
* io-wq context and there may be further retries in async hybrid
|
|
|
|
* mode. For the locked case, the caller must call commit when
|
|
|
|
* the transfer completes (or if we get -EAGAIN and must poll of
|
|
|
|
* retry).
|
2022-06-13 13:07:23 +00:00
|
|
|
*/
|
2024-03-05 14:31:52 +00:00
|
|
|
req->flags &= ~REQ_F_BUFFERS_COMMIT;
|
2022-06-13 13:07:23 +00:00
|
|
|
req->buf_list = NULL;
|
|
|
|
bl->head++;
|
|
|
|
}
|
|
|
|
return u64_to_user_ptr(buf->addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
|
|
|
|
unsigned int issue_flags)
|
|
|
|
{
|
|
|
|
struct io_ring_ctx *ctx = req->ctx;
|
|
|
|
struct io_buffer_list *bl;
|
|
|
|
void __user *ret = NULL;
|
|
|
|
|
|
|
|
io_ring_submit_lock(req->ctx, issue_flags);
|
|
|
|
|
|
|
|
bl = io_buffer_get_list(ctx, req->buf_index);
|
|
|
|
if (likely(bl)) {
|
2024-03-13 15:52:41 +00:00
|
|
|
if (bl->is_buf_ring)
|
2022-06-13 13:07:23 +00:00
|
|
|
ret = io_ring_buffer_select(req, len, bl, issue_flags);
|
|
|
|
else
|
|
|
|
ret = io_provided_buffer_select(req, len, bl);
|
|
|
|
}
|
|
|
|
io_ring_submit_unlock(req->ctx, issue_flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-03-05 14:31:52 +00:00
|
|
|
/* cap it at a reasonable 256, will be one page even for 4K */
|
|
|
|
#define PEEK_MAX_IMPORT 256
|
|
|
|
|
|
|
|
static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
|
|
|
|
struct io_buffer_list *bl)
|
|
|
|
{
|
|
|
|
struct io_uring_buf_ring *br = bl->buf_ring;
|
|
|
|
struct iovec *iov = arg->iovs;
|
|
|
|
int nr_iovs = arg->nr_iovs;
|
|
|
|
__u16 nr_avail, tail, head;
|
|
|
|
struct io_uring_buf *buf;
|
|
|
|
|
|
|
|
tail = smp_load_acquire(&br->tail);
|
|
|
|
head = bl->head;
|
|
|
|
nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
|
|
|
|
if (unlikely(!nr_avail))
|
|
|
|
return -ENOBUFS;
|
|
|
|
|
|
|
|
buf = io_ring_head_to_buf(br, head, bl->mask);
|
|
|
|
if (arg->max_len) {
|
|
|
|
int needed;
|
|
|
|
|
|
|
|
needed = (arg->max_len + buf->len - 1) / buf->len;
|
|
|
|
needed = min(needed, PEEK_MAX_IMPORT);
|
|
|
|
if (nr_avail > needed)
|
|
|
|
nr_avail = needed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* only alloc a bigger array if we know we have data to map, eg not
|
|
|
|
* a speculative peek operation.
|
|
|
|
*/
|
|
|
|
if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
|
|
|
|
iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
|
|
|
|
if (unlikely(!iov))
|
|
|
|
return -ENOMEM;
|
|
|
|
if (arg->mode & KBUF_MODE_FREE)
|
|
|
|
kfree(arg->iovs);
|
|
|
|
arg->iovs = iov;
|
|
|
|
nr_iovs = nr_avail;
|
|
|
|
} else if (nr_avail < nr_iovs) {
|
|
|
|
nr_iovs = nr_avail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set it to max, if not set, so we can use it unconditionally */
|
|
|
|
if (!arg->max_len)
|
|
|
|
arg->max_len = INT_MAX;
|
|
|
|
|
|
|
|
req->buf_index = buf->bid;
|
|
|
|
do {
|
|
|
|
/* truncate end piece, if needed */
|
|
|
|
if (buf->len > arg->max_len)
|
|
|
|
buf->len = arg->max_len;
|
|
|
|
|
|
|
|
iov->iov_base = u64_to_user_ptr(buf->addr);
|
|
|
|
iov->iov_len = buf->len;
|
|
|
|
iov++;
|
|
|
|
|
|
|
|
arg->out_len += buf->len;
|
|
|
|
arg->max_len -= buf->len;
|
|
|
|
if (!arg->max_len)
|
|
|
|
break;
|
|
|
|
|
|
|
|
buf = io_ring_head_to_buf(br, ++head, bl->mask);
|
|
|
|
} while (--nr_iovs);
|
|
|
|
|
|
|
|
if (head == tail)
|
|
|
|
req->flags |= REQ_F_BL_EMPTY;
|
|
|
|
|
|
|
|
req->flags |= REQ_F_BUFFER_RING;
|
|
|
|
req->buf_list = bl;
|
|
|
|
return iov - arg->iovs;
|
|
|
|
}
|
|
|
|
|
|
|
|
int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
|
|
|
|
unsigned int issue_flags)
|
|
|
|
{
|
|
|
|
struct io_ring_ctx *ctx = req->ctx;
|
|
|
|
struct io_buffer_list *bl;
|
|
|
|
int ret = -ENOENT;
|
|
|
|
|
|
|
|
io_ring_submit_lock(ctx, issue_flags);
|
|
|
|
bl = io_buffer_get_list(ctx, req->buf_index);
|
|
|
|
if (unlikely(!bl))
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
if (bl->is_buf_ring) {
|
|
|
|
ret = io_ring_buffers_peek(req, arg, bl);
|
|
|
|
/*
|
|
|
|
* Don't recycle these buffers if we need to go through poll.
|
|
|
|
* Nobody else can use them anyway, and holding on to provided
|
|
|
|
* buffers for a send/write operation would happen on the app
|
|
|
|
* side anyway with normal buffers. Besides, we already
|
|
|
|
* committed them, they cannot be put back in the queue.
|
|
|
|
*/
|
|
|
|
if (ret > 0) {
|
|
|
|
req->flags |= REQ_F_BL_NO_RECYCLE;
|
|
|
|
req->buf_list->head += ret;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
|
|
|
|
}
|
|
|
|
out_unlock:
|
|
|
|
io_ring_submit_unlock(ctx, issue_flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
|
|
|
|
{
|
|
|
|
struct io_ring_ctx *ctx = req->ctx;
|
|
|
|
struct io_buffer_list *bl;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
lockdep_assert_held(&ctx->uring_lock);
|
|
|
|
|
|
|
|
bl = io_buffer_get_list(ctx, req->buf_index);
|
|
|
|
if (unlikely(!bl))
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (bl->is_buf_ring) {
|
|
|
|
ret = io_ring_buffers_peek(req, arg, bl);
|
|
|
|
if (ret > 0)
|
|
|
|
req->flags |= REQ_F_BUFFERS_COMMIT;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* don't support multiple buffer selections for legacy */
|
|
|
|
return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
|
|
|
|
}
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
static int __io_remove_buffers(struct io_ring_ctx *ctx,
|
|
|
|
struct io_buffer_list *bl, unsigned nbufs)
|
|
|
|
{
|
|
|
|
unsigned i = 0;
|
|
|
|
|
|
|
|
/* shouldn't happen */
|
|
|
|
if (!nbufs)
|
|
|
|
return 0;
|
|
|
|
|
2024-03-13 15:52:41 +00:00
|
|
|
if (bl->is_buf_ring) {
|
2022-06-13 13:07:23 +00:00
|
|
|
i = bl->buf_ring->tail - bl->head;
|
2024-03-13 02:24:21 +00:00
|
|
|
if (bl->buf_nr_pages) {
|
2023-03-14 17:07:19 +00:00
|
|
|
int j;
|
|
|
|
|
2024-03-13 02:24:21 +00:00
|
|
|
if (!bl->is_mmap) {
|
|
|
|
for (j = 0; j < bl->buf_nr_pages; j++)
|
|
|
|
unpin_user_page(bl->buf_pages[j]);
|
|
|
|
}
|
|
|
|
io_pages_unmap(bl->buf_ring, &bl->buf_pages,
|
|
|
|
&bl->buf_nr_pages, bl->is_mmap);
|
|
|
|
bl->is_mmap = 0;
|
2023-03-14 17:07:19 +00:00
|
|
|
}
|
2022-06-13 13:07:23 +00:00
|
|
|
/* make sure it's seen as empty */
|
|
|
|
INIT_LIST_HEAD(&bl->buf_list);
|
2024-03-13 15:52:41 +00:00
|
|
|
bl->is_buf_ring = 0;
|
2022-06-13 13:07:23 +00:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2023-04-01 19:50:39 +00:00
|
|
|
/* protects io_buffers_cache */
|
|
|
|
lockdep_assert_held(&ctx->uring_lock);
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
while (!list_empty(&bl->buf_list)) {
|
|
|
|
struct io_buffer *nxt;
|
|
|
|
|
|
|
|
nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
|
2023-04-01 19:50:39 +00:00
|
|
|
list_move(&nxt->list, &ctx->io_buffers_cache);
|
2022-06-13 13:07:23 +00:00
|
|
|
if (++i == nbufs)
|
|
|
|
return i;
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2024-04-02 22:16:03 +00:00
|
|
|
void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
|
2024-03-15 22:12:51 +00:00
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&bl->refs)) {
|
|
|
|
__io_remove_buffers(ctx, bl, -1U);
|
|
|
|
kfree_rcu(bl, rcu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
void io_destroy_buffers(struct io_ring_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct io_buffer_list *bl;
|
io_uring/kbuf: Use slab for struct io_buffer objects
The allocation of struct io_buffer for metadata of provided buffers is
done through a custom allocator that directly gets pages and
fragments them. But, slab would do just fine, as this is not a hot path
(in fact, it is a deprecated feature) and, by keeping a custom allocator
implementation we lose benefits like tracking, poisoning,
sanitizers. Finally, the custom code is more complex and requires
keeping the list of pages in struct ctx for no good reason. This patch
cleans this path up and just uses slab.
I microbenchmarked it by forcing the allocation of a large number of
objects with the least number of io_uring commands possible (keeping
nbufs=USHRT_MAX), with and without the patch. There is a slight
increase in time spent in the allocation with slab, of course, but even
when allocating to system resources exhaustion, which is not very
realistic and happened around 1/2 billion provided buffers for me, it
wasn't a significant hit in system time. Specially if we think of a
real-world scenario, an application doing register/unregister of
provided buffers will hit ctx->io_buffers_cache more often than actually
going to slab.
Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20231005000531.30800-4-krisman@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-10-05 00:05:31 +00:00
|
|
|
struct list_head *item, *tmp;
|
|
|
|
struct io_buffer *buf;
|
2022-06-13 13:07:23 +00:00
|
|
|
unsigned long index;
|
|
|
|
|
|
|
|
xa_for_each(&ctx->io_bl_xa, index, bl) {
|
|
|
|
xa_erase(&ctx->io_bl_xa, bl->bgid);
|
2024-03-15 22:12:51 +00:00
|
|
|
io_put_bl(ctx, bl);
|
2022-06-13 13:07:23 +00:00
|
|
|
}
|
|
|
|
|
2023-11-28 00:02:48 +00:00
|
|
|
/*
|
|
|
|
* Move deferred locked entries to cache before pruning
|
|
|
|
*/
|
|
|
|
spin_lock(&ctx->completion_lock);
|
|
|
|
if (!list_empty(&ctx->io_buffers_comp))
|
|
|
|
list_splice_init(&ctx->io_buffers_comp, &ctx->io_buffers_cache);
|
|
|
|
spin_unlock(&ctx->completion_lock);
|
|
|
|
|
io_uring/kbuf: Use slab for struct io_buffer objects
The allocation of struct io_buffer for metadata of provided buffers is
done through a custom allocator that directly gets pages and
fragments them. But, slab would do just fine, as this is not a hot path
(in fact, it is a deprecated feature) and, by keeping a custom allocator
implementation we lose benefits like tracking, poisoning,
sanitizers. Finally, the custom code is more complex and requires
keeping the list of pages in struct ctx for no good reason. This patch
cleans this path up and just uses slab.
I microbenchmarked it by forcing the allocation of a large number of
objects with the least number of io_uring commands possible (keeping
nbufs=USHRT_MAX), with and without the patch. There is a slight
increase in time spent in the allocation with slab, of course, but even
when allocating to system resources exhaustion, which is not very
realistic and happened around 1/2 billion provided buffers for me, it
wasn't a significant hit in system time. Specially if we think of a
real-world scenario, an application doing register/unregister of
provided buffers will hit ctx->io_buffers_cache more often than actually
going to slab.
Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20231005000531.30800-4-krisman@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-10-05 00:05:31 +00:00
|
|
|
list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
|
|
|
|
buf = list_entry(item, struct io_buffer, list);
|
|
|
|
kmem_cache_free(io_buf_cachep, buf);
|
2022-06-13 13:07:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
|
|
{
|
2022-08-11 07:11:15 +00:00
|
|
|
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
|
2022-06-13 13:07:23 +00:00
|
|
|
u64 tmp;
|
|
|
|
|
|
|
|
if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
|
|
|
|
sqe->splice_fd_in)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
tmp = READ_ONCE(sqe->fd);
|
2023-10-05 00:05:30 +00:00
|
|
|
if (!tmp || tmp > MAX_BIDS_PER_BGID)
|
2022-06-13 13:07:23 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
memset(p, 0, sizeof(*p));
|
|
|
|
p->nbufs = tmp;
|
|
|
|
p->bgid = READ_ONCE(sqe->buf_group);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
|
|
|
{
|
2022-08-11 07:11:15 +00:00
|
|
|
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
|
2022-06-13 13:07:23 +00:00
|
|
|
struct io_ring_ctx *ctx = req->ctx;
|
|
|
|
struct io_buffer_list *bl;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
io_ring_submit_lock(ctx, issue_flags);
|
|
|
|
|
|
|
|
ret = -ENOENT;
|
|
|
|
bl = io_buffer_get_list(ctx, p->bgid);
|
|
|
|
if (bl) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
/* can't use provide/remove buffers command on mapped buffers */
|
2024-03-13 15:52:41 +00:00
|
|
|
if (!bl->is_buf_ring)
|
2022-06-13 13:07:23 +00:00
|
|
|
ret = __io_remove_buffers(ctx, bl, p->nbufs);
|
|
|
|
}
|
2022-11-24 19:46:40 +00:00
|
|
|
io_ring_submit_unlock(ctx, issue_flags);
|
2022-06-13 13:07:23 +00:00
|
|
|
if (ret < 0)
|
|
|
|
req_set_fail(req);
|
|
|
|
io_req_set_res(req, ret, 0);
|
2022-11-24 19:46:40 +00:00
|
|
|
return IOU_OK;
|
2022-06-13 13:07:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
|
|
{
|
|
|
|
unsigned long size, tmp_check;
|
2022-08-11 07:11:15 +00:00
|
|
|
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
|
2022-06-13 13:07:23 +00:00
|
|
|
u64 tmp;
|
|
|
|
|
|
|
|
if (sqe->rw_flags || sqe->splice_fd_in)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
tmp = READ_ONCE(sqe->fd);
|
2023-10-05 00:05:30 +00:00
|
|
|
if (!tmp || tmp > MAX_BIDS_PER_BGID)
|
2022-06-13 13:07:23 +00:00
|
|
|
return -E2BIG;
|
|
|
|
p->nbufs = tmp;
|
|
|
|
p->addr = READ_ONCE(sqe->addr);
|
|
|
|
p->len = READ_ONCE(sqe->len);
|
|
|
|
|
|
|
|
if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
|
|
|
|
&size))
|
|
|
|
return -EOVERFLOW;
|
|
|
|
if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
size = (unsigned long)p->len * p->nbufs;
|
|
|
|
if (!access_ok(u64_to_user_ptr(p->addr), size))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
p->bgid = READ_ONCE(sqe->buf_group);
|
|
|
|
tmp = READ_ONCE(sqe->off);
|
|
|
|
if (tmp > USHRT_MAX)
|
|
|
|
return -E2BIG;
|
2023-10-05 00:05:30 +00:00
|
|
|
if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
|
2022-11-10 17:50:55 +00:00
|
|
|
return -EINVAL;
|
2022-06-13 13:07:23 +00:00
|
|
|
p->bid = tmp;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
io_uring/kbuf: Use slab for struct io_buffer objects
The allocation of struct io_buffer for metadata of provided buffers is
done through a custom allocator that directly gets pages and
fragments them. But, slab would do just fine, as this is not a hot path
(in fact, it is a deprecated feature) and, by keeping a custom allocator
implementation we lose benefits like tracking, poisoning,
sanitizers. Finally, the custom code is more complex and requires
keeping the list of pages in struct ctx for no good reason. This patch
cleans this path up and just uses slab.
I microbenchmarked it by forcing the allocation of a large number of
objects with the least number of io_uring commands possible (keeping
nbufs=USHRT_MAX), with and without the patch. There is a slight
increase in time spent in the allocation with slab, of course, but even
when allocating to system resources exhaustion, which is not very
realistic and happened around 1/2 billion provided buffers for me, it
wasn't a significant hit in system time. Specially if we think of a
real-world scenario, an application doing register/unregister of
provided buffers will hit ctx->io_buffers_cache more often than actually
going to slab.
Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20231005000531.30800-4-krisman@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-10-05 00:05:31 +00:00
|
|
|
#define IO_BUFFER_ALLOC_BATCH 64
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
|
|
|
|
{
|
io_uring/kbuf: Use slab for struct io_buffer objects
The allocation of struct io_buffer for metadata of provided buffers is
done through a custom allocator that directly gets pages and
fragments them. But, slab would do just fine, as this is not a hot path
(in fact, it is a deprecated feature) and, by keeping a custom allocator
implementation we lose benefits like tracking, poisoning,
sanitizers. Finally, the custom code is more complex and requires
keeping the list of pages in struct ctx for no good reason. This patch
cleans this path up and just uses slab.
I microbenchmarked it by forcing the allocation of a large number of
objects with the least number of io_uring commands possible (keeping
nbufs=USHRT_MAX), with and without the patch. There is a slight
increase in time spent in the allocation with slab, of course, but even
when allocating to system resources exhaustion, which is not very
realistic and happened around 1/2 billion provided buffers for me, it
wasn't a significant hit in system time. Specially if we think of a
real-world scenario, an application doing register/unregister of
provided buffers will hit ctx->io_buffers_cache more often than actually
going to slab.
Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20231005000531.30800-4-krisman@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-10-05 00:05:31 +00:00
|
|
|
struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
|
|
|
|
int allocated;
|
2022-06-13 13:07:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Completions that don't happen inline (eg not under uring_lock) will
|
|
|
|
* add to ->io_buffers_comp. If we don't have any free buffers, check
|
|
|
|
* the completion list and splice those entries first.
|
|
|
|
*/
|
|
|
|
if (!list_empty_careful(&ctx->io_buffers_comp)) {
|
|
|
|
spin_lock(&ctx->completion_lock);
|
|
|
|
if (!list_empty(&ctx->io_buffers_comp)) {
|
|
|
|
list_splice_init(&ctx->io_buffers_comp,
|
|
|
|
&ctx->io_buffers_cache);
|
|
|
|
spin_unlock(&ctx->completion_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
spin_unlock(&ctx->completion_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No free buffers and no completion entries either. Allocate a new
|
io_uring/kbuf: Use slab for struct io_buffer objects
The allocation of struct io_buffer for metadata of provided buffers is
done through a custom allocator that directly gets pages and
fragments them. But, slab would do just fine, as this is not a hot path
(in fact, it is a deprecated feature) and, by keeping a custom allocator
implementation we lose benefits like tracking, poisoning,
sanitizers. Finally, the custom code is more complex and requires
keeping the list of pages in struct ctx for no good reason. This patch
cleans this path up and just uses slab.
I microbenchmarked it by forcing the allocation of a large number of
objects with the least number of io_uring commands possible (keeping
nbufs=USHRT_MAX), with and without the patch. There is a slight
increase in time spent in the allocation with slab, of course, but even
when allocating to system resources exhaustion, which is not very
realistic and happened around 1/2 billion provided buffers for me, it
wasn't a significant hit in system time. Specially if we think of a
real-world scenario, an application doing register/unregister of
provided buffers will hit ctx->io_buffers_cache more often than actually
going to slab.
Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20231005000531.30800-4-krisman@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-10-05 00:05:31 +00:00
|
|
|
* batch of buffer entries and add those to our freelist.
|
2022-06-13 13:07:23 +00:00
|
|
|
*/
|
|
|
|
|
io_uring/kbuf: Use slab for struct io_buffer objects
The allocation of struct io_buffer for metadata of provided buffers is
done through a custom allocator that directly gets pages and
fragments them. But, slab would do just fine, as this is not a hot path
(in fact, it is a deprecated feature) and, by keeping a custom allocator
implementation we lose benefits like tracking, poisoning,
sanitizers. Finally, the custom code is more complex and requires
keeping the list of pages in struct ctx for no good reason. This patch
cleans this path up and just uses slab.
I microbenchmarked it by forcing the allocation of a large number of
objects with the least number of io_uring commands possible (keeping
nbufs=USHRT_MAX), with and without the patch. There is a slight
increase in time spent in the allocation with slab, of course, but even
when allocating to system resources exhaustion, which is not very
realistic and happened around 1/2 billion provided buffers for me, it
wasn't a significant hit in system time. Specially if we think of a
real-world scenario, an application doing register/unregister of
provided buffers will hit ctx->io_buffers_cache more often than actually
going to slab.
Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20231005000531.30800-4-krisman@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-10-05 00:05:31 +00:00
|
|
|
allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
|
|
|
|
ARRAY_SIZE(bufs), (void **) bufs);
|
|
|
|
if (unlikely(!allocated)) {
|
|
|
|
/*
|
|
|
|
* Bulk alloc is all-or-nothing. If we fail to get a batch,
|
|
|
|
* retry single alloc to be on the safe side.
|
|
|
|
*/
|
|
|
|
bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
|
|
|
|
if (!bufs[0])
|
|
|
|
return -ENOMEM;
|
|
|
|
allocated = 1;
|
2022-06-13 13:07:23 +00:00
|
|
|
}
|
|
|
|
|
io_uring/kbuf: Use slab for struct io_buffer objects
The allocation of struct io_buffer for metadata of provided buffers is
done through a custom allocator that directly gets pages and
fragments them. But, slab would do just fine, as this is not a hot path
(in fact, it is a deprecated feature) and, by keeping a custom allocator
implementation we lose benefits like tracking, poisoning,
sanitizers. Finally, the custom code is more complex and requires
keeping the list of pages in struct ctx for no good reason. This patch
cleans this path up and just uses slab.
I microbenchmarked it by forcing the allocation of a large number of
objects with the least number of io_uring commands possible (keeping
nbufs=USHRT_MAX), with and without the patch. There is a slight
increase in time spent in the allocation with slab, of course, but even
when allocating to system resources exhaustion, which is not very
realistic and happened around 1/2 billion provided buffers for me, it
wasn't a significant hit in system time. Specially if we think of a
real-world scenario, an application doing register/unregister of
provided buffers will hit ctx->io_buffers_cache more often than actually
going to slab.
Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20231005000531.30800-4-krisman@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-10-05 00:05:31 +00:00
|
|
|
while (allocated)
|
|
|
|
list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
|
|
|
|
struct io_buffer_list *bl)
|
|
|
|
{
|
|
|
|
struct io_buffer *buf;
|
|
|
|
u64 addr = pbuf->addr;
|
|
|
|
int i, bid = pbuf->bid;
|
|
|
|
|
|
|
|
for (i = 0; i < pbuf->nbufs; i++) {
|
|
|
|
if (list_empty(&ctx->io_buffers_cache) &&
|
|
|
|
io_refill_buffer_cache(ctx))
|
|
|
|
break;
|
|
|
|
buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
|
|
|
|
list);
|
|
|
|
list_move_tail(&buf->list, &bl->buf_list);
|
|
|
|
buf->addr = addr;
|
|
|
|
buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
|
|
|
|
buf->bid = bid;
|
|
|
|
buf->bgid = pbuf->bgid;
|
|
|
|
addr += pbuf->len;
|
|
|
|
bid++;
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
return i ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
|
|
|
{
|
2022-08-11 07:11:15 +00:00
|
|
|
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
|
2022-06-13 13:07:23 +00:00
|
|
|
struct io_ring_ctx *ctx = req->ctx;
|
|
|
|
struct io_buffer_list *bl;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
io_ring_submit_lock(ctx, issue_flags);
|
|
|
|
|
|
|
|
bl = io_buffer_get_list(ctx, p->bgid);
|
|
|
|
if (unlikely(!bl)) {
|
2022-08-04 14:13:46 +00:00
|
|
|
bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
|
2022-06-13 13:07:23 +00:00
|
|
|
if (!bl) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
INIT_LIST_HEAD(&bl->buf_list);
|
|
|
|
ret = io_buffer_add_list(ctx, bl, p->bgid);
|
|
|
|
if (ret) {
|
io_uring: free io_buffer_list entries via RCU
mmap_lock nests under uring_lock out of necessity, as we may be doing
user copies with uring_lock held. However, for mmap of provided buffer
rings, we attempt to grab uring_lock with mmap_lock already held from
do_mmap(). This makes lockdep, rightfully, complain:
WARNING: possible circular locking dependency detected
6.7.0-rc1-00009-gff3337ebaf94-dirty #4438 Not tainted
------------------------------------------------------
buf-ring.t/442 is trying to acquire lock:
ffff00020e1480a8 (&ctx->uring_lock){+.+.}-{3:3}, at: io_uring_validate_mmap_request.isra.0+0x4c/0x140
but task is already holding lock:
ffff0000dc226190 (&mm->mmap_lock){++++}-{3:3}, at: vm_mmap_pgoff+0x124/0x264
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #1 (&mm->mmap_lock){++++}-{3:3}:
__might_fault+0x90/0xbc
io_register_pbuf_ring+0x94/0x488
__arm64_sys_io_uring_register+0x8dc/0x1318
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
-> #0 (&ctx->uring_lock){+.+.}-{3:3}:
__lock_acquire+0x19a0/0x2d14
lock_acquire+0x2e0/0x44c
__mutex_lock+0x118/0x564
mutex_lock_nested+0x20/0x28
io_uring_validate_mmap_request.isra.0+0x4c/0x140
io_uring_mmu_get_unmapped_area+0x3c/0x98
get_unmapped_area+0xa4/0x158
do_mmap+0xec/0x5b4
vm_mmap_pgoff+0x158/0x264
ksys_mmap_pgoff+0x1d4/0x254
__arm64_sys_mmap+0x80/0x9c
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
From that mmap(2) path, we really just need to ensure that the buffer
list doesn't go away from underneath us. For the lower indexed entries,
they never go away until the ring is freed and we can always sanely
reference those as long as the caller has a file reference. For the
higher indexed ones in our xarray, we just need to ensure that the
buffer list remains valid while we return the address of it.
Free the higher indexed io_buffer_list entries via RCU. With that we can
avoid needing ->uring_lock inside mmap(2), and simply hold the RCU read
lock around the buffer list lookup and address check.
To ensure that the arrayed lookup either returns a valid fully formulated
entry via RCU lookup, add an 'is_ready' flag that we access with store
and release memory ordering. This isn't needed for the xarray lookups,
but doesn't hurt either. Since this isn't a fast path, retain it across
both types. Similarly, for the allocated array inside the ctx, ensure
we use the proper load/acquire as setup could in theory be running in
parallel with mmap.
While in there, add a few lockdep checks for documentation purposes.
Cc: stable@vger.kernel.org
Fixes: c56e022c0a27 ("io_uring: add support for user mapped provided buffer ring")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-11-28 00:54:40 +00:00
|
|
|
/*
|
|
|
|
* Doesn't need rcu free as it was never visible, but
|
2024-03-14 16:45:07 +00:00
|
|
|
* let's keep it consistent throughout.
|
io_uring: free io_buffer_list entries via RCU
mmap_lock nests under uring_lock out of necessity, as we may be doing
user copies with uring_lock held. However, for mmap of provided buffer
rings, we attempt to grab uring_lock with mmap_lock already held from
do_mmap(). This makes lockdep, rightfully, complain:
WARNING: possible circular locking dependency detected
6.7.0-rc1-00009-gff3337ebaf94-dirty #4438 Not tainted
------------------------------------------------------
buf-ring.t/442 is trying to acquire lock:
ffff00020e1480a8 (&ctx->uring_lock){+.+.}-{3:3}, at: io_uring_validate_mmap_request.isra.0+0x4c/0x140
but task is already holding lock:
ffff0000dc226190 (&mm->mmap_lock){++++}-{3:3}, at: vm_mmap_pgoff+0x124/0x264
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #1 (&mm->mmap_lock){++++}-{3:3}:
__might_fault+0x90/0xbc
io_register_pbuf_ring+0x94/0x488
__arm64_sys_io_uring_register+0x8dc/0x1318
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
-> #0 (&ctx->uring_lock){+.+.}-{3:3}:
__lock_acquire+0x19a0/0x2d14
lock_acquire+0x2e0/0x44c
__mutex_lock+0x118/0x564
mutex_lock_nested+0x20/0x28
io_uring_validate_mmap_request.isra.0+0x4c/0x140
io_uring_mmu_get_unmapped_area+0x3c/0x98
get_unmapped_area+0xa4/0x158
do_mmap+0xec/0x5b4
vm_mmap_pgoff+0x158/0x264
ksys_mmap_pgoff+0x1d4/0x254
__arm64_sys_mmap+0x80/0x9c
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
From that mmap(2) path, we really just need to ensure that the buffer
list doesn't go away from underneath us. For the lower indexed entries,
they never go away until the ring is freed and we can always sanely
reference those as long as the caller has a file reference. For the
higher indexed ones in our xarray, we just need to ensure that the
buffer list remains valid while we return the address of it.
Free the higher indexed io_buffer_list entries via RCU. With that we can
avoid needing ->uring_lock inside mmap(2), and simply hold the RCU read
lock around the buffer list lookup and address check.
To ensure that the arrayed lookup either returns a valid fully formulated
entry via RCU lookup, add an 'is_ready' flag that we access with store
and release memory ordering. This isn't needed for the xarray lookups,
but doesn't hurt either. Since this isn't a fast path, retain it across
both types. Similarly, for the allocated array inside the ctx, ensure
we use the proper load/acquire as setup could in theory be running in
parallel with mmap.
While in there, add a few lockdep checks for documentation purposes.
Cc: stable@vger.kernel.org
Fixes: c56e022c0a27 ("io_uring: add support for user mapped provided buffer ring")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-11-28 00:54:40 +00:00
|
|
|
*/
|
2024-03-14 16:45:07 +00:00
|
|
|
kfree_rcu(bl, rcu);
|
2022-06-13 13:07:23 +00:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* can't add buffers via this command for a mapped buffer ring */
|
2024-03-13 15:52:41 +00:00
|
|
|
if (bl->is_buf_ring) {
|
2022-06-13 13:07:23 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = io_add_buffers(ctx, p, bl);
|
|
|
|
err:
|
2022-11-24 19:46:40 +00:00
|
|
|
io_ring_submit_unlock(ctx, issue_flags);
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
if (ret < 0)
|
|
|
|
req_set_fail(req);
|
|
|
|
io_req_set_res(req, ret, 0);
|
2022-11-24 19:46:40 +00:00
|
|
|
return IOU_OK;
|
2022-06-13 13:07:23 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 16:55:50 +00:00
|
|
|
static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
|
|
|
|
struct io_buffer_list *bl)
|
2022-06-13 13:07:23 +00:00
|
|
|
{
|
2024-03-12 16:42:27 +00:00
|
|
|
struct io_uring_buf_ring *br = NULL;
|
2022-06-13 13:07:23 +00:00
|
|
|
struct page **pages;
|
2024-03-13 21:01:03 +00:00
|
|
|
int nr_pages, ret;
|
2022-06-13 13:07:23 +00:00
|
|
|
|
2023-03-14 16:55:50 +00:00
|
|
|
pages = io_pin_pages(reg->ring_addr,
|
|
|
|
flex_array_size(br, bufs, reg->ring_entries),
|
|
|
|
&nr_pages);
|
|
|
|
if (IS_ERR(pages))
|
|
|
|
return PTR_ERR(pages);
|
|
|
|
|
2024-03-12 16:42:27 +00:00
|
|
|
br = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
|
|
|
|
if (!br) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error_unpin;
|
|
|
|
}
|
2023-10-03 00:14:08 +00:00
|
|
|
|
2023-03-17 16:42:08 +00:00
|
|
|
#ifdef SHM_COLOUR
|
|
|
|
/*
|
|
|
|
* On platforms that have specific aliasing requirements, SHM_COLOUR
|
|
|
|
* is set and we must guarantee that the kernel and user side align
|
|
|
|
* nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
|
|
|
|
* the application mmap's the provided ring buffer. Fail the request
|
|
|
|
* if we, by chance, don't end up with aligned addresses. The app
|
|
|
|
* should use IOU_PBUF_RING_MMAP instead, and liburing will handle
|
|
|
|
* this transparently.
|
|
|
|
*/
|
2024-03-12 16:42:27 +00:00
|
|
|
if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1)) {
|
|
|
|
ret = -EINVAL;
|
2023-10-03 00:14:08 +00:00
|
|
|
goto error_unpin;
|
2024-03-12 16:42:27 +00:00
|
|
|
}
|
2023-03-17 16:42:08 +00:00
|
|
|
#endif
|
2023-03-14 16:55:50 +00:00
|
|
|
bl->buf_pages = pages;
|
|
|
|
bl->buf_nr_pages = nr_pages;
|
|
|
|
bl->buf_ring = br;
|
2024-03-13 15:52:41 +00:00
|
|
|
bl->is_buf_ring = 1;
|
2023-03-14 17:07:19 +00:00
|
|
|
bl->is_mmap = 0;
|
|
|
|
return 0;
|
2023-10-03 00:14:08 +00:00
|
|
|
error_unpin:
|
2024-03-13 21:01:03 +00:00
|
|
|
unpin_user_pages(pages, nr_pages);
|
2023-10-03 00:14:08 +00:00
|
|
|
kvfree(pages);
|
2024-03-12 16:42:27 +00:00
|
|
|
vunmap(br);
|
|
|
|
return ret;
|
2023-03-14 17:07:19 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 23:47:04 +00:00
|
|
|
static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
|
|
|
|
struct io_uring_buf_reg *reg,
|
2023-03-14 17:07:19 +00:00
|
|
|
struct io_buffer_list *bl)
|
|
|
|
{
|
|
|
|
size_t ring_size;
|
|
|
|
|
|
|
|
ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
|
|
|
|
|
2024-03-13 02:24:21 +00:00
|
|
|
bl->buf_ring = io_pages_map(&bl->buf_pages, &bl->buf_nr_pages, ring_size);
|
2024-07-18 19:00:53 +00:00
|
|
|
if (IS_ERR(bl->buf_ring)) {
|
|
|
|
bl->buf_ring = NULL;
|
2024-03-13 02:24:21 +00:00
|
|
|
return -ENOMEM;
|
2024-07-18 19:00:53 +00:00
|
|
|
}
|
2024-03-13 02:24:21 +00:00
|
|
|
|
2024-03-13 15:52:41 +00:00
|
|
|
bl->is_buf_ring = 1;
|
2023-03-14 17:07:19 +00:00
|
|
|
bl->is_mmap = 1;
|
2023-03-14 16:55:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
|
|
|
|
{
|
|
|
|
struct io_uring_buf_reg reg;
|
|
|
|
struct io_buffer_list *bl, *free_bl = NULL;
|
|
|
|
int ret;
|
|
|
|
|
io_uring: free io_buffer_list entries via RCU
mmap_lock nests under uring_lock out of necessity, as we may be doing
user copies with uring_lock held. However, for mmap of provided buffer
rings, we attempt to grab uring_lock with mmap_lock already held from
do_mmap(). This makes lockdep, rightfully, complain:
WARNING: possible circular locking dependency detected
6.7.0-rc1-00009-gff3337ebaf94-dirty #4438 Not tainted
------------------------------------------------------
buf-ring.t/442 is trying to acquire lock:
ffff00020e1480a8 (&ctx->uring_lock){+.+.}-{3:3}, at: io_uring_validate_mmap_request.isra.0+0x4c/0x140
but task is already holding lock:
ffff0000dc226190 (&mm->mmap_lock){++++}-{3:3}, at: vm_mmap_pgoff+0x124/0x264
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #1 (&mm->mmap_lock){++++}-{3:3}:
__might_fault+0x90/0xbc
io_register_pbuf_ring+0x94/0x488
__arm64_sys_io_uring_register+0x8dc/0x1318
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
-> #0 (&ctx->uring_lock){+.+.}-{3:3}:
__lock_acquire+0x19a0/0x2d14
lock_acquire+0x2e0/0x44c
__mutex_lock+0x118/0x564
mutex_lock_nested+0x20/0x28
io_uring_validate_mmap_request.isra.0+0x4c/0x140
io_uring_mmu_get_unmapped_area+0x3c/0x98
get_unmapped_area+0xa4/0x158
do_mmap+0xec/0x5b4
vm_mmap_pgoff+0x158/0x264
ksys_mmap_pgoff+0x1d4/0x254
__arm64_sys_mmap+0x80/0x9c
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
From that mmap(2) path, we really just need to ensure that the buffer
list doesn't go away from underneath us. For the lower indexed entries,
they never go away until the ring is freed and we can always sanely
reference those as long as the caller has a file reference. For the
higher indexed ones in our xarray, we just need to ensure that the
buffer list remains valid while we return the address of it.
Free the higher indexed io_buffer_list entries via RCU. With that we can
avoid needing ->uring_lock inside mmap(2), and simply hold the RCU read
lock around the buffer list lookup and address check.
To ensure that the arrayed lookup either returns a valid fully formulated
entry via RCU lookup, add an 'is_ready' flag that we access with store
and release memory ordering. This isn't needed for the xarray lookups,
but doesn't hurt either. Since this isn't a fast path, retain it across
both types. Similarly, for the allocated array inside the ctx, ensure
we use the proper load/acquire as setup could in theory be running in
parallel with mmap.
While in there, add a few lockdep checks for documentation purposes.
Cc: stable@vger.kernel.org
Fixes: c56e022c0a27 ("io_uring: add support for user mapped provided buffer ring")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-11-28 00:54:40 +00:00
|
|
|
lockdep_assert_held(&ctx->uring_lock);
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
if (copy_from_user(®, arg, sizeof(reg)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2023-03-14 17:01:45 +00:00
|
|
|
if (reg.resv[0] || reg.resv[1] || reg.resv[2])
|
2022-06-13 13:07:23 +00:00
|
|
|
return -EINVAL;
|
2023-03-14 17:07:19 +00:00
|
|
|
if (reg.flags & ~IOU_PBUF_RING_MMAP)
|
2022-06-13 13:07:23 +00:00
|
|
|
return -EINVAL;
|
2023-03-14 17:07:19 +00:00
|
|
|
if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
|
|
|
|
if (!reg.ring_addr)
|
|
|
|
return -EFAULT;
|
|
|
|
if (reg.ring_addr & ~PAGE_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
if (reg.ring_addr)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
if (!is_power_of_2(reg.ring_entries))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* cannot disambiguate full vs empty due to head/tail size */
|
|
|
|
if (reg.ring_entries >= 65536)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
bl = io_buffer_get_list(ctx, reg.bgid);
|
|
|
|
if (bl) {
|
|
|
|
/* if mapped buffer ring OR classic exists, don't allow */
|
2024-03-13 15:52:41 +00:00
|
|
|
if (bl->is_buf_ring || !list_empty(&bl->buf_list))
|
2022-06-13 13:07:23 +00:00
|
|
|
return -EEXIST;
|
|
|
|
} else {
|
|
|
|
free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
|
|
|
|
if (!bl)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2023-03-14 17:07:19 +00:00
|
|
|
if (!(reg.flags & IOU_PBUF_RING_MMAP))
|
|
|
|
ret = io_pin_pbuf_ring(®, bl);
|
|
|
|
else
|
2023-11-27 23:47:04 +00:00
|
|
|
ret = io_alloc_pbuf_ring(ctx, ®, bl);
|
2022-06-13 13:07:23 +00:00
|
|
|
|
2023-03-14 17:07:19 +00:00
|
|
|
if (!ret) {
|
|
|
|
bl->nr_entries = reg.ring_entries;
|
|
|
|
bl->mask = reg.ring_entries - 1;
|
2023-03-14 16:55:50 +00:00
|
|
|
|
2023-03-14 17:07:19 +00:00
|
|
|
io_buffer_add_list(ctx, bl, reg.bgid);
|
|
|
|
return 0;
|
2022-06-13 13:07:23 +00:00
|
|
|
}
|
|
|
|
|
io_uring: free io_buffer_list entries via RCU
mmap_lock nests under uring_lock out of necessity, as we may be doing
user copies with uring_lock held. However, for mmap of provided buffer
rings, we attempt to grab uring_lock with mmap_lock already held from
do_mmap(). This makes lockdep, rightfully, complain:
WARNING: possible circular locking dependency detected
6.7.0-rc1-00009-gff3337ebaf94-dirty #4438 Not tainted
------------------------------------------------------
buf-ring.t/442 is trying to acquire lock:
ffff00020e1480a8 (&ctx->uring_lock){+.+.}-{3:3}, at: io_uring_validate_mmap_request.isra.0+0x4c/0x140
but task is already holding lock:
ffff0000dc226190 (&mm->mmap_lock){++++}-{3:3}, at: vm_mmap_pgoff+0x124/0x264
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #1 (&mm->mmap_lock){++++}-{3:3}:
__might_fault+0x90/0xbc
io_register_pbuf_ring+0x94/0x488
__arm64_sys_io_uring_register+0x8dc/0x1318
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
-> #0 (&ctx->uring_lock){+.+.}-{3:3}:
__lock_acquire+0x19a0/0x2d14
lock_acquire+0x2e0/0x44c
__mutex_lock+0x118/0x564
mutex_lock_nested+0x20/0x28
io_uring_validate_mmap_request.isra.0+0x4c/0x140
io_uring_mmu_get_unmapped_area+0x3c/0x98
get_unmapped_area+0xa4/0x158
do_mmap+0xec/0x5b4
vm_mmap_pgoff+0x158/0x264
ksys_mmap_pgoff+0x1d4/0x254
__arm64_sys_mmap+0x80/0x9c
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
From that mmap(2) path, we really just need to ensure that the buffer
list doesn't go away from underneath us. For the lower indexed entries,
they never go away until the ring is freed and we can always sanely
reference those as long as the caller has a file reference. For the
higher indexed ones in our xarray, we just need to ensure that the
buffer list remains valid while we return the address of it.
Free the higher indexed io_buffer_list entries via RCU. With that we can
avoid needing ->uring_lock inside mmap(2), and simply hold the RCU read
lock around the buffer list lookup and address check.
To ensure that the arrayed lookup either returns a valid fully formulated
entry via RCU lookup, add an 'is_ready' flag that we access with store
and release memory ordering. This isn't needed for the xarray lookups,
but doesn't hurt either. Since this isn't a fast path, retain it across
both types. Similarly, for the allocated array inside the ctx, ensure
we use the proper load/acquire as setup could in theory be running in
parallel with mmap.
While in there, add a few lockdep checks for documentation purposes.
Cc: stable@vger.kernel.org
Fixes: c56e022c0a27 ("io_uring: add support for user mapped provided buffer ring")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-11-28 00:54:40 +00:00
|
|
|
kfree_rcu(free_bl, rcu);
|
2023-03-14 17:07:19 +00:00
|
|
|
return ret;
|
2022-06-13 13:07:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
|
|
|
|
{
|
|
|
|
struct io_uring_buf_reg reg;
|
|
|
|
struct io_buffer_list *bl;
|
|
|
|
|
io_uring: free io_buffer_list entries via RCU
mmap_lock nests under uring_lock out of necessity, as we may be doing
user copies with uring_lock held. However, for mmap of provided buffer
rings, we attempt to grab uring_lock with mmap_lock already held from
do_mmap(). This makes lockdep, rightfully, complain:
WARNING: possible circular locking dependency detected
6.7.0-rc1-00009-gff3337ebaf94-dirty #4438 Not tainted
------------------------------------------------------
buf-ring.t/442 is trying to acquire lock:
ffff00020e1480a8 (&ctx->uring_lock){+.+.}-{3:3}, at: io_uring_validate_mmap_request.isra.0+0x4c/0x140
but task is already holding lock:
ffff0000dc226190 (&mm->mmap_lock){++++}-{3:3}, at: vm_mmap_pgoff+0x124/0x264
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #1 (&mm->mmap_lock){++++}-{3:3}:
__might_fault+0x90/0xbc
io_register_pbuf_ring+0x94/0x488
__arm64_sys_io_uring_register+0x8dc/0x1318
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
-> #0 (&ctx->uring_lock){+.+.}-{3:3}:
__lock_acquire+0x19a0/0x2d14
lock_acquire+0x2e0/0x44c
__mutex_lock+0x118/0x564
mutex_lock_nested+0x20/0x28
io_uring_validate_mmap_request.isra.0+0x4c/0x140
io_uring_mmu_get_unmapped_area+0x3c/0x98
get_unmapped_area+0xa4/0x158
do_mmap+0xec/0x5b4
vm_mmap_pgoff+0x158/0x264
ksys_mmap_pgoff+0x1d4/0x254
__arm64_sys_mmap+0x80/0x9c
invoke_syscall+0x5c/0x17c
el0_svc_common.constprop.0+0x108/0x130
do_el0_svc+0x2c/0x38
el0_svc+0x4c/0x94
el0t_64_sync_handler+0x118/0x124
el0t_64_sync+0x168/0x16c
From that mmap(2) path, we really just need to ensure that the buffer
list doesn't go away from underneath us. For the lower indexed entries,
they never go away until the ring is freed and we can always sanely
reference those as long as the caller has a file reference. For the
higher indexed ones in our xarray, we just need to ensure that the
buffer list remains valid while we return the address of it.
Free the higher indexed io_buffer_list entries via RCU. With that we can
avoid needing ->uring_lock inside mmap(2), and simply hold the RCU read
lock around the buffer list lookup and address check.
To ensure that the arrayed lookup either returns a valid fully formulated
entry via RCU lookup, add an 'is_ready' flag that we access with store
and release memory ordering. This isn't needed for the xarray lookups,
but doesn't hurt either. Since this isn't a fast path, retain it across
both types. Similarly, for the allocated array inside the ctx, ensure
we use the proper load/acquire as setup could in theory be running in
parallel with mmap.
While in there, add a few lockdep checks for documentation purposes.
Cc: stable@vger.kernel.org
Fixes: c56e022c0a27 ("io_uring: add support for user mapped provided buffer ring")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-11-28 00:54:40 +00:00
|
|
|
lockdep_assert_held(&ctx->uring_lock);
|
|
|
|
|
2022-06-13 13:07:23 +00:00
|
|
|
if (copy_from_user(®, arg, sizeof(reg)))
|
|
|
|
return -EFAULT;
|
2023-03-14 17:01:45 +00:00
|
|
|
if (reg.resv[0] || reg.resv[1] || reg.resv[2])
|
|
|
|
return -EINVAL;
|
|
|
|
if (reg.flags)
|
2022-06-13 13:07:23 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
bl = io_buffer_get_list(ctx, reg.bgid);
|
|
|
|
if (!bl)
|
|
|
|
return -ENOENT;
|
2024-03-13 15:52:41 +00:00
|
|
|
if (!bl->is_buf_ring)
|
2022-06-13 13:07:23 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2024-03-14 16:45:07 +00:00
|
|
|
xa_erase(&ctx->io_bl_xa, bl->bgid);
|
2024-03-15 22:12:51 +00:00
|
|
|
io_put_bl(ctx, bl);
|
2022-06-13 13:07:23 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2023-03-14 17:07:19 +00:00
|
|
|
|
2023-12-21 16:02:57 +00:00
|
|
|
int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
|
|
|
|
{
|
|
|
|
struct io_uring_buf_status buf_status;
|
|
|
|
struct io_buffer_list *bl;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++)
|
|
|
|
if (buf_status.resv[i])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
bl = io_buffer_get_list(ctx, buf_status.buf_group);
|
|
|
|
if (!bl)
|
|
|
|
return -ENOENT;
|
2024-03-13 15:52:41 +00:00
|
|
|
if (!bl->is_buf_ring)
|
2023-12-21 16:02:57 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
buf_status.head = bl->head;
|
|
|
|
if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-04-02 22:16:03 +00:00
|
|
|
struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
|
|
|
|
unsigned long bgid)
|
2023-03-14 17:07:19 +00:00
|
|
|
{
|
|
|
|
struct io_buffer_list *bl;
|
2024-04-02 22:16:03 +00:00
|
|
|
bool ret;
|
2023-03-14 17:07:19 +00:00
|
|
|
|
2024-04-02 22:16:03 +00:00
|
|
|
/*
|
|
|
|
* We have to be a bit careful here - we're inside mmap and cannot grab
|
|
|
|
* the uring_lock. This means the buffer_list could be simultaneously
|
|
|
|
* going away, if someone is trying to be sneaky. Look it up under rcu
|
|
|
|
* so we know it's not going away, and attempt to grab a reference to
|
|
|
|
* it. If the ref is already zero, then fail the mapping. If successful,
|
|
|
|
* the caller will call io_put_bl() to drop the the reference at at the
|
|
|
|
* end. This may then safely free the buffer_list (and drop the pages)
|
|
|
|
* at that point, vm_insert_pages() would've already grabbed the
|
|
|
|
* necessary vma references.
|
|
|
|
*/
|
|
|
|
rcu_read_lock();
|
|
|
|
bl = xa_load(&ctx->io_bl_xa, bgid);
|
|
|
|
/* must be a mmap'able buffer ring and have pages */
|
|
|
|
ret = false;
|
|
|
|
if (bl && bl->is_mmap)
|
|
|
|
ret = atomic_inc_not_zero(&bl->refs);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return bl;
|
|
|
|
|
|
|
|
return ERR_PTR(-EINVAL);
|
2023-03-14 17:07:19 +00:00
|
|
|
}
|
2023-11-27 23:47:04 +00:00
|
|
|
|
2024-03-13 02:24:21 +00:00
|
|
|
int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma)
|
2023-11-27 23:47:04 +00:00
|
|
|
{
|
2024-03-13 02:24:21 +00:00
|
|
|
struct io_ring_ctx *ctx = file->private_data;
|
|
|
|
loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT;
|
|
|
|
struct io_buffer_list *bl;
|
|
|
|
int bgid, ret;
|
2023-11-27 23:47:04 +00:00
|
|
|
|
2024-03-13 02:24:21 +00:00
|
|
|
bgid = (pgoff & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
|
|
|
|
bl = io_pbuf_get_bl(ctx, bgid);
|
|
|
|
if (IS_ERR(bl))
|
|
|
|
return PTR_ERR(bl);
|
|
|
|
|
|
|
|
ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages);
|
|
|
|
io_put_bl(ctx, bl);
|
|
|
|
return ret;
|
2023-11-27 23:47:04 +00:00
|
|
|
}
|