block/null_blk: add queue_rqs() support

Add batched mq_ops.queue_rqs() support in null_blk for testing. The
implementation is much easy since null_blk doesn't have commit_rqs().

We simply handle each request one by one, if errors are encountered,
leave them in the passed in list and return back.

There is about 3.6% improvement in IOPS of fio/t/io_uring on null_blk
with hw_queue_depth=256 on my test VM, from 1.09M to 1.13M.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20230913151616.3164338-6-chengming.zhou@linux.dev
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Chengming Zhou 2023-09-13 15:16:16 +00:00 committed by Jens Axboe
parent 217b613a53
commit d78bfa1346

View File

@ -1750,6 +1750,25 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
}
static void null_queue_rqs(struct request **rqlist)
{
struct request *requeue_list = NULL;
struct request **requeue_lastp = &requeue_list;
struct blk_mq_queue_data bd = { };
blk_status_t ret;
do {
struct request *rq = rq_list_pop(rqlist);
bd.rq = rq;
ret = null_queue_rq(rq->mq_hctx, &bd);
if (ret != BLK_STS_OK)
rq_list_add_tail(&requeue_lastp, rq);
} while (!rq_list_empty(*rqlist));
*rqlist = requeue_list;
}
static void cleanup_queue(struct nullb_queue *nq)
{
bitmap_free(nq->tag_map);
@ -1802,6 +1821,7 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.queue_rqs = null_queue_rqs,
.complete = null_complete_rq,
.timeout = null_timeout_rq,
.poll = null_poll,