mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
io_uring/kbuf: get rid of lower BGID lists
Just rely on the xarray for any kind of bgid. This simplifies things, and it really doesn't bring us much, if anything. Cc: stable@vger.kernel.org # v6.4+ Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
73eaa2b583
commit
09ab7eff38
@ -294,7 +294,6 @@ struct io_ring_ctx {
|
||||
|
||||
struct io_submit_state submit_state;
|
||||
|
||||
struct io_buffer_list *io_bl;
|
||||
struct xarray io_bl_xa;
|
||||
|
||||
struct io_hash_table cancel_table_locked;
|
||||
|
@ -351,7 +351,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
err:
|
||||
kfree(ctx->cancel_table.hbs);
|
||||
kfree(ctx->cancel_table_locked.hbs);
|
||||
kfree(ctx->io_bl);
|
||||
xa_destroy(&ctx->io_bl_xa);
|
||||
kfree(ctx);
|
||||
return NULL;
|
||||
@ -2932,7 +2931,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
||||
io_napi_free(ctx);
|
||||
kfree(ctx->cancel_table.hbs);
|
||||
kfree(ctx->cancel_table_locked.hbs);
|
||||
kfree(ctx->io_bl);
|
||||
xa_destroy(&ctx->io_bl_xa);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
@ -17,8 +17,6 @@
|
||||
|
||||
#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
|
||||
|
||||
#define BGID_ARRAY 64
|
||||
|
||||
/* BIDs are addressed by a 16-bit field in a CQE */
|
||||
#define MAX_BIDS_PER_BGID (1 << 16)
|
||||
|
||||
@ -40,13 +38,9 @@ struct io_buf_free {
|
||||
int inuse;
|
||||
};
|
||||
|
||||
static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
|
||||
struct io_buffer_list *bl,
|
||||
unsigned int bgid)
|
||||
static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
|
||||
unsigned int bgid)
|
||||
{
|
||||
if (bl && bgid < BGID_ARRAY)
|
||||
return &bl[bgid];
|
||||
|
||||
return xa_load(&ctx->io_bl_xa, bgid);
|
||||
}
|
||||
|
||||
@ -55,7 +49,7 @@ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
|
||||
{
|
||||
lockdep_assert_held(&ctx->uring_lock);
|
||||
|
||||
return __io_buffer_get_list(ctx, ctx->io_bl, bgid);
|
||||
return __io_buffer_get_list(ctx, bgid);
|
||||
}
|
||||
|
||||
static int io_buffer_add_list(struct io_ring_ctx *ctx,
|
||||
@ -68,10 +62,6 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
|
||||
*/
|
||||
bl->bgid = bgid;
|
||||
smp_store_release(&bl->is_ready, 1);
|
||||
|
||||
if (bgid < BGID_ARRAY)
|
||||
return 0;
|
||||
|
||||
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
|
||||
}
|
||||
|
||||
@ -208,24 +198,6 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_buffer_list *bl;
|
||||
int i;
|
||||
|
||||
bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
|
||||
if (!bl)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < BGID_ARRAY; i++) {
|
||||
INIT_LIST_HEAD(&bl[i].buf_list);
|
||||
bl[i].bgid = i;
|
||||
}
|
||||
|
||||
smp_store_release(&ctx->io_bl, bl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the given mapped range as free for reuse
|
||||
*/
|
||||
@ -300,13 +272,6 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
|
||||
struct list_head *item, *tmp;
|
||||
struct io_buffer *buf;
|
||||
unsigned long index;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BGID_ARRAY; i++) {
|
||||
if (!ctx->io_bl)
|
||||
break;
|
||||
__io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
|
||||
}
|
||||
|
||||
xa_for_each(&ctx->io_bl_xa, index, bl) {
|
||||
xa_erase(&ctx->io_bl_xa, bl->bgid);
|
||||
@ -489,12 +454,6 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
|
||||
if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
|
||||
ret = io_init_bl_list(ctx);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
bl = io_buffer_get_list(ctx, p->bgid);
|
||||
if (unlikely(!bl)) {
|
||||
bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
|
||||
@ -507,14 +466,9 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
||||
if (ret) {
|
||||
/*
|
||||
* Doesn't need rcu free as it was never visible, but
|
||||
* let's keep it consistent throughout. Also can't
|
||||
* be a lower indexed array group, as adding one
|
||||
* where lookup failed cannot happen.
|
||||
* let's keep it consistent throughout.
|
||||
*/
|
||||
if (p->bgid >= BGID_ARRAY)
|
||||
kfree_rcu(bl, rcu);
|
||||
else
|
||||
WARN_ON_ONCE(1);
|
||||
kfree_rcu(bl, rcu);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@ -679,12 +633,6 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
|
||||
if (reg.ring_entries >= 65536)
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
|
||||
int ret = io_init_bl_list(ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bl = io_buffer_get_list(ctx, reg.bgid);
|
||||
if (bl) {
|
||||
/* if mapped buffer ring OR classic exists, don't allow */
|
||||
@ -734,10 +682,8 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
|
||||
return -EINVAL;
|
||||
|
||||
__io_remove_buffers(ctx, bl, -1U);
|
||||
if (bl->bgid >= BGID_ARRAY) {
|
||||
xa_erase(&ctx->io_bl_xa, bl->bgid);
|
||||
kfree_rcu(bl, rcu);
|
||||
}
|
||||
xa_erase(&ctx->io_bl_xa, bl->bgid);
|
||||
kfree_rcu(bl, rcu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -771,7 +717,7 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
|
||||
{
|
||||
struct io_buffer_list *bl;
|
||||
|
||||
bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
|
||||
bl = __io_buffer_get_list(ctx, bgid);
|
||||
|
||||
if (!bl || !bl->is_mmap)
|
||||
return NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user