forked from Minki/linux
blk-mq: simplify the blk_mq_get_request calling convention
The bio argument is entirely unused, and the request_queue can be passed through the alloc_data, given that it needs to be filled out for the low-level tag allocation anyway. Also rename the function to __blk_mq_alloc_request as the switch between get and alloc in the call chains is rather confusing. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Daniel Wagner <dwagner@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5d9c305b8e
commit
e6e7abffe3
@ -332,10 +332,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
|||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct request *blk_mq_get_request(struct request_queue *q,
|
static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
||||||
struct bio *bio,
|
|
||||||
struct blk_mq_alloc_data *data)
|
|
||||||
{
|
{
|
||||||
|
struct request_queue *q = data->q;
|
||||||
struct elevator_queue *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
unsigned int tag;
|
unsigned int tag;
|
||||||
@ -346,7 +345,6 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
|||||||
if (blk_queue_rq_alloc_time(q))
|
if (blk_queue_rq_alloc_time(q))
|
||||||
alloc_time_ns = ktime_get_ns();
|
alloc_time_ns = ktime_get_ns();
|
||||||
|
|
||||||
data->q = q;
|
|
||||||
if (likely(!data->ctx)) {
|
if (likely(!data->ctx)) {
|
||||||
data->ctx = blk_mq_get_ctx(q);
|
data->ctx = blk_mq_get_ctx(q);
|
||||||
clear_ctx_on_error = true;
|
clear_ctx_on_error = true;
|
||||||
@ -398,7 +396,11 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
|||||||
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
||||||
blk_mq_req_flags_t flags)
|
blk_mq_req_flags_t flags)
|
||||||
{
|
{
|
||||||
struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
|
struct blk_mq_alloc_data data = {
|
||||||
|
.q = q,
|
||||||
|
.flags = flags,
|
||||||
|
.cmd_flags = op,
|
||||||
|
};
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -406,7 +408,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
rq = blk_mq_get_request(q, NULL, &alloc_data);
|
rq = __blk_mq_alloc_request(&data);
|
||||||
if (!rq)
|
if (!rq)
|
||||||
goto out_queue_exit;
|
goto out_queue_exit;
|
||||||
rq->__data_len = 0;
|
rq->__data_len = 0;
|
||||||
@ -422,7 +424,11 @@ EXPORT_SYMBOL(blk_mq_alloc_request);
|
|||||||
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||||
unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
|
unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
|
||||||
{
|
{
|
||||||
struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
|
struct blk_mq_alloc_data data = {
|
||||||
|
.q = q,
|
||||||
|
.flags = flags,
|
||||||
|
.cmd_flags = op,
|
||||||
|
};
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
int ret;
|
int ret;
|
||||||
@ -448,14 +454,14 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
|||||||
* If not tell the caller that it should skip this queue.
|
* If not tell the caller that it should skip this queue.
|
||||||
*/
|
*/
|
||||||
ret = -EXDEV;
|
ret = -EXDEV;
|
||||||
alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
|
data.hctx = q->queue_hw_ctx[hctx_idx];
|
||||||
if (!blk_mq_hw_queue_mapped(alloc_data.hctx))
|
if (!blk_mq_hw_queue_mapped(data.hctx))
|
||||||
goto out_queue_exit;
|
goto out_queue_exit;
|
||||||
cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
|
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
|
||||||
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
|
data.ctx = __blk_mq_get_ctx(q, cpu);
|
||||||
|
|
||||||
ret = -EWOULDBLOCK;
|
ret = -EWOULDBLOCK;
|
||||||
rq = blk_mq_get_request(q, NULL, &alloc_data);
|
rq = __blk_mq_alloc_request(&data);
|
||||||
if (!rq)
|
if (!rq)
|
||||||
goto out_queue_exit;
|
goto out_queue_exit;
|
||||||
return rq;
|
return rq;
|
||||||
@ -2027,7 +2033,9 @@ blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
{
|
{
|
||||||
const int is_sync = op_is_sync(bio->bi_opf);
|
const int is_sync = op_is_sync(bio->bi_opf);
|
||||||
const int is_flush_fua = op_is_flush(bio->bi_opf);
|
const int is_flush_fua = op_is_flush(bio->bi_opf);
|
||||||
struct blk_mq_alloc_data data = { .flags = 0};
|
struct blk_mq_alloc_data data = {
|
||||||
|
.q = q,
|
||||||
|
};
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
struct blk_plug *plug;
|
struct blk_plug *plug;
|
||||||
struct request *same_queue_rq = NULL;
|
struct request *same_queue_rq = NULL;
|
||||||
@ -2051,7 +2059,7 @@ blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
rq_qos_throttle(q, bio);
|
rq_qos_throttle(q, bio);
|
||||||
|
|
||||||
data.cmd_flags = bio->bi_opf;
|
data.cmd_flags = bio->bi_opf;
|
||||||
rq = blk_mq_get_request(q, bio, &data);
|
rq = __blk_mq_alloc_request(&data);
|
||||||
if (unlikely(!rq)) {
|
if (unlikely(!rq)) {
|
||||||
rq_qos_cleanup(q, bio);
|
rq_qos_cleanup(q, bio);
|
||||||
if (bio->bi_opf & REQ_NOWAIT)
|
if (bio->bi_opf & REQ_NOWAIT)
|
||||||
|
Loading…
Reference in New Issue
Block a user