mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
block: enable batched allocation for blk_mq_alloc_request()
The filesystem IO path can take advantage of allocating batches of requests, if the underlying submitter tells the block layer about it through the blk_plug. For passthrough IO, the exported API is the blk_mq_alloc_request() helper, and that one does not allow for request caching. Wire up request caching for blk_mq_alloc_request(), which is generally done without having a bio available upfront. Tested-by: Anuj Gupta <anuj20.g@samsung.com> Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e73a625bc2
commit
4b6a5d9cea
@ -510,25 +510,87 @@ retry:
|
||||
alloc_time_ns);
|
||||
}
|
||||
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
|
||||
blk_mq_req_flags_t flags)
|
||||
static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
|
||||
struct blk_plug *plug,
|
||||
blk_opf_t opf,
|
||||
blk_mq_req_flags_t flags)
|
||||
{
|
||||
struct blk_mq_alloc_data data = {
|
||||
.q = q,
|
||||
.flags = flags,
|
||||
.cmd_flags = opf,
|
||||
.nr_tags = 1,
|
||||
.nr_tags = plug->nr_ios,
|
||||
.cached_rq = &plug->cached_rq,
|
||||
};
|
||||
struct request *rq;
|
||||
int ret;
|
||||
|
||||
ret = blk_queue_enter(q, flags);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
if (blk_queue_enter(q, flags))
|
||||
return NULL;
|
||||
|
||||
plug->nr_ios = 1;
|
||||
|
||||
rq = __blk_mq_alloc_requests(&data);
|
||||
if (!rq)
|
||||
goto out_queue_exit;
|
||||
if (unlikely(!rq))
|
||||
blk_queue_exit(q);
|
||||
return rq;
|
||||
}
|
||||
|
||||
static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
|
||||
blk_opf_t opf,
|
||||
blk_mq_req_flags_t flags)
|
||||
{
|
||||
struct blk_plug *plug = current->plug;
|
||||
struct request *rq;
|
||||
|
||||
if (!plug)
|
||||
return NULL;
|
||||
if (rq_list_empty(plug->cached_rq)) {
|
||||
if (plug->nr_ios == 1)
|
||||
return NULL;
|
||||
rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
|
||||
if (rq)
|
||||
goto got_it;
|
||||
return NULL;
|
||||
}
|
||||
rq = rq_list_peek(&plug->cached_rq);
|
||||
if (!rq || rq->q != q)
|
||||
return NULL;
|
||||
|
||||
if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
|
||||
return NULL;
|
||||
if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
|
||||
return NULL;
|
||||
|
||||
plug->cached_rq = rq_list_next(rq);
|
||||
got_it:
|
||||
rq->cmd_flags = opf;
|
||||
INIT_LIST_HEAD(&rq->queuelist);
|
||||
return rq;
|
||||
}
|
||||
|
||||
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
|
||||
blk_mq_req_flags_t flags)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
rq = blk_mq_alloc_cached_request(q, opf, flags);
|
||||
if (!rq) {
|
||||
struct blk_mq_alloc_data data = {
|
||||
.q = q,
|
||||
.flags = flags,
|
||||
.cmd_flags = opf,
|
||||
.nr_tags = 1,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = blk_queue_enter(q, flags);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
rq = __blk_mq_alloc_requests(&data);
|
||||
if (!rq)
|
||||
goto out_queue_exit;
|
||||
}
|
||||
rq->__data_len = 0;
|
||||
rq->__sector = (sector_t) -1;
|
||||
rq->bio = rq->biotail = NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user