mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
io_uring/kbuf: cleanup passing back cflags
We have various functions calculating the CQE cflags we need to pass back, but it's all the same everywhere. Make a number of the putting functions void, and just have the two main helps for this, io_put_kbuf() and io_put_kbuf_comp() calculate the actual mask and pass it back. While at it, cleanup how we put REQ_F_BUFFER_RING buffers. Before this change, we would call into __io_put_kbuf() only to go right back in to the header defined functions. As clearing this type of buffer is just re-assigning the buf_index and incrementing the head, this is very wasteful. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
949249e25f
commit
8435c6f380
@ -102,10 +102,8 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
|
||||
void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
|
||||
{
|
||||
unsigned int cflags;
|
||||
|
||||
/*
|
||||
* We can add this buffer back to two lists:
|
||||
*
|
||||
@ -118,21 +116,17 @@ unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
|
||||
* We migrate buffers from the comp_list to the issue cache list
|
||||
* when we need one.
|
||||
*/
|
||||
if (req->flags & REQ_F_BUFFER_RING) {
|
||||
/* no buffers to recycle for this case */
|
||||
cflags = __io_put_kbuf_list(req, NULL);
|
||||
} else if (issue_flags & IO_URING_F_UNLOCKED) {
|
||||
if (issue_flags & IO_URING_F_UNLOCKED) {
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
spin_lock(&ctx->completion_lock);
|
||||
cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
|
||||
__io_put_kbuf_list(req, &ctx->io_buffers_comp);
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
} else {
|
||||
lockdep_assert_held(&req->ctx->uring_lock);
|
||||
|
||||
cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
|
||||
__io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
|
||||
}
|
||||
return cflags;
|
||||
}
|
||||
|
||||
static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
|
||||
|
@ -57,7 +57,7 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
|
||||
|
||||
void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
|
||||
|
||||
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
|
||||
void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
|
||||
|
||||
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
|
||||
|
||||
@ -108,41 +108,54 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
|
||||
struct list_head *list)
|
||||
static inline void __io_put_kbuf_ring(struct io_kiocb *req)
|
||||
{
|
||||
unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
|
||||
if (req->buf_list) {
|
||||
req->buf_index = req->buf_list->bgid;
|
||||
req->buf_list->head++;
|
||||
}
|
||||
req->flags &= ~REQ_F_BUFFER_RING;
|
||||
}
|
||||
|
||||
static inline void __io_put_kbuf_list(struct io_kiocb *req,
|
||||
struct list_head *list)
|
||||
{
|
||||
if (req->flags & REQ_F_BUFFER_RING) {
|
||||
if (req->buf_list) {
|
||||
req->buf_index = req->buf_list->bgid;
|
||||
req->buf_list->head++;
|
||||
}
|
||||
req->flags &= ~REQ_F_BUFFER_RING;
|
||||
__io_put_kbuf_ring(req);
|
||||
} else {
|
||||
req->buf_index = req->kbuf->bgid;
|
||||
list_add(&req->kbuf->list, list);
|
||||
req->flags &= ~REQ_F_BUFFER_SELECTED;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
lockdep_assert_held(&req->ctx->completion_lock);
|
||||
|
||||
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
|
||||
return 0;
|
||||
return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
|
||||
|
||||
ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
|
||||
__io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned int io_put_kbuf(struct io_kiocb *req,
|
||||
unsigned issue_flags)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
|
||||
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
|
||||
return 0;
|
||||
return __io_put_kbuf(req, issue_flags);
|
||||
|
||||
ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
|
||||
if (req->flags & REQ_F_BUFFER_RING)
|
||||
__io_put_kbuf_ring(req);
|
||||
else
|
||||
__io_put_kbuf(req, issue_flags);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user