blk-mq: cleanup request allocation
Refactor the request alloction so that blk_mq_get_cached_request tries to find a cached request first, and the entirely separate and now self contained blk_mq_get_new_requests allocates one or more requests if that is not possible. There is a small change in behavior as submit_bio_checks is called twice now if a cached request is present but can't be used, but that is a small price to pay for unwinding this code. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20211124062856.1444266-1-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
82d981d423
commit
5b13bc8a3f
@ -2717,8 +2717,12 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
|
|||||||
};
|
};
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
|
if (unlikely(bio_queue_enter(bio)))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
if (unlikely(!submit_bio_checks(bio)))
|
||||||
|
goto queue_exit;
|
||||||
|
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
|
||||||
|
goto queue_exit;
|
||||||
|
|
||||||
rq_qos_throttle(q, bio);
|
rq_qos_throttle(q, bio);
|
||||||
|
|
||||||
@ -2729,65 +2733,45 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
|
|||||||
}
|
}
|
||||||
|
|
||||||
rq = __blk_mq_alloc_requests(&data);
|
rq = __blk_mq_alloc_requests(&data);
|
||||||
if (rq)
|
if (!rq)
|
||||||
|
goto fail;
|
||||||
return rq;
|
return rq;
|
||||||
|
|
||||||
|
fail:
|
||||||
rq_qos_cleanup(q, bio);
|
rq_qos_cleanup(q, bio);
|
||||||
if (bio->bi_opf & REQ_NOWAIT)
|
if (bio->bi_opf & REQ_NOWAIT)
|
||||||
bio_wouldblock_error(bio);
|
bio_wouldblock_error(bio);
|
||||||
|
queue_exit:
|
||||||
|
blk_queue_exit(q);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio)
|
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
|
||||||
{
|
struct blk_plug *plug, struct bio *bio, unsigned int nsegs)
|
||||||
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct request *blk_mq_get_request(struct request_queue *q,
|
|
||||||
struct blk_plug *plug,
|
|
||||||
struct bio *bio,
|
|
||||||
unsigned int nsegs)
|
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
bool checked = false;
|
|
||||||
|
|
||||||
if (plug) {
|
if (!plug)
|
||||||
|
return NULL;
|
||||||
rq = rq_list_peek(&plug->cached_rq);
|
rq = rq_list_peek(&plug->cached_rq);
|
||||||
if (rq && rq->q == q) {
|
if (!rq || rq->q != q)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
if (unlikely(!submit_bio_checks(bio)))
|
if (unlikely(!submit_bio_checks(bio)))
|
||||||
return NULL;
|
return NULL;
|
||||||
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
|
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
|
||||||
return NULL;
|
return NULL;
|
||||||
checked = true;
|
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
|
||||||
if (!blk_mq_can_use_cached_rq(rq, bio))
|
return NULL;
|
||||||
goto fallback;
|
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
rq->cmd_flags = bio->bi_opf;
|
rq->cmd_flags = bio->bi_opf;
|
||||||
plug->cached_rq = rq_list_next(rq);
|
plug->cached_rq = rq_list_next(rq);
|
||||||
INIT_LIST_HEAD(&rq->queuelist);
|
INIT_LIST_HEAD(&rq->queuelist);
|
||||||
rq_qos_throttle(q, bio);
|
rq_qos_throttle(q, bio);
|
||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
fallback:
|
|
||||||
if (unlikely(bio_queue_enter(bio)))
|
|
||||||
return NULL;
|
|
||||||
if (unlikely(!checked && !submit_bio_checks(bio)))
|
|
||||||
goto out_put;
|
|
||||||
rq = blk_mq_get_new_requests(q, plug, bio, nsegs);
|
|
||||||
if (rq)
|
|
||||||
return rq;
|
|
||||||
out_put:
|
|
||||||
blk_queue_exit(q);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_mq_submit_bio - Create and send a request to block device.
|
* blk_mq_submit_bio - Create and send a request to block device.
|
||||||
@ -2805,9 +2789,9 @@ out_put:
|
|||||||
void blk_mq_submit_bio(struct bio *bio)
|
void blk_mq_submit_bio(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||||
|
struct blk_plug *plug = blk_mq_plug(q, bio);
|
||||||
const int is_sync = op_is_sync(bio->bi_opf);
|
const int is_sync = op_is_sync(bio->bi_opf);
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
struct blk_plug *plug;
|
|
||||||
unsigned int nr_segs = 1;
|
unsigned int nr_segs = 1;
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
@ -2821,10 +2805,12 @@ void blk_mq_submit_bio(struct bio *bio)
|
|||||||
if (!bio_integrity_prep(bio))
|
if (!bio_integrity_prep(bio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
plug = blk_mq_plug(q, bio);
|
rq = blk_mq_get_cached_request(q, plug, bio, nr_segs);
|
||||||
rq = blk_mq_get_request(q, plug, bio, nr_segs);
|
if (!rq) {
|
||||||
|
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
|
||||||
if (unlikely(!rq))
|
if (unlikely(!rq))
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
trace_block_getrq(bio);
|
trace_block_getrq(bio);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user