io_uring: refactor io_queue_sqe()

io_queue_sqe() is a part of the submission path and we try hard to keep
it inlined, so shed some extra bytes from it by moving the error
checking part into io_queue_sqe_arm_apoll() and renaming it accordingly.

note: io_queue_sqe_arm_apoll() is not inlined, thus the patch doesn't
change the number of function calls for the apoll path.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/9b79edd246336decfaca79b949a15ac69123490d.1650056133.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2022-04-15 22:08:28 +01:00 committed by Jens Axboe
parent 77955efbc4
commit 7bfa9badc7

View File

@ -7508,10 +7508,17 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
io_put_req(req);
}
static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
static void io_queue_async(struct io_kiocb *req, int ret)
__must_hold(&req->ctx->uring_lock)
{
struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
struct io_kiocb *linked_timeout;
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
io_req_complete_failed(req, ret);
return;
}
linked_timeout = io_prep_linked_timeout(req);
switch (io_arm_poll_handler(req, 0)) {
case IO_APOLL_READY:
@ -7547,13 +7554,10 @@ static inline void io_queue_sqe(struct io_kiocb *req)
* We async punt it if the file wasn't marked NOWAIT, or if the file
* doesn't support non-blocking read/write attempts
*/
if (likely(!ret)) {
if (likely(!ret))
io_arm_ltimeout(req);
} else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
io_queue_sqe_arm_apoll(req);
} else {
io_req_complete_failed(req, ret);
}
else
io_queue_async(req, ret);
}
static void io_queue_sqe_fallback(struct io_kiocb *req)