io_uring: inline io_read()'s iovec freeing

io_read() has not the simpliest control flow with a lot of jumps and
it's hard to read. One of those is a out_free: label, which frees iovec.
However, from the middle of io_read() iovec is NULL'ed and so
kfree(iovec) is no-op, it leaves us with two place where we can inline
it and further clean up the code.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-02-04 13:52:03 +00:00 committed by Jens Axboe
parent 7335e3bf9d
commit 5ea5dd4584

View File

@ -3530,14 +3530,18 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
}
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
if (unlikely(ret))
goto out_free;
if (unlikely(ret)) {
kfree(iovec);
return ret;
}
ret = io_iter_do_read(req, iter);
if (ret == -EIOCBQUEUED) {
ret = 0;
goto out_free;
/* it's faster to check here then delegate to kfree */
if (iovec)
kfree(iovec);
return 0;
} else if (ret == -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
@ -3560,8 +3564,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
return ret2;
rw = req->async_data;
/* it's copied and will be cleaned with ->io */
iovec = NULL;
/* now use our persistent iterator, if we aren't already */
iter = &rw->iter;
retry:
@ -3580,21 +3582,14 @@ retry:
* do, then just retry at the new offset.
*/
ret = io_iter_do_read(req, iter);
if (ret == -EIOCBQUEUED) {
ret = 0;
goto out_free;
} else if (ret > 0 && ret < io_size) {
/* we got some bytes, but not all. retry. */
if (ret == -EIOCBQUEUED)
return 0;
/* we got some bytes, but not all. retry. */
if (ret > 0 && ret < io_size)
goto retry;
}
done:
kiocb_done(kiocb, ret, cs);
ret = 0;
out_free:
/* it's reportedly faster than delegating the null check to kfree() */
if (iovec)
kfree(iovec);
return ret;
return 0;
}
static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)