forked from Minki/linux
blk-mq: remove non-blocking pass in blk_mq_map_request
bt_get already does a non-blocking pass as well as running the queue when scheduling internally, no need to duplicate it. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
841bac2c87
commit
63581af3f3
@ -1210,20 +1210,8 @@ static struct request *blk_mq_map_request(struct request_queue *q,
|
||||
op_flags |= REQ_SYNC;
|
||||
|
||||
trace_block_getrq(q, bio, op);
|
||||
blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx);
|
||||
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
|
||||
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
|
||||
if (unlikely(!rq)) {
|
||||
blk_mq_run_hw_queue(hctx, false);
|
||||
blk_mq_put_ctx(ctx);
|
||||
trace_block_sleeprq(q, bio, op);
|
||||
|
||||
ctx = blk_mq_get_ctx(q);
|
||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
|
||||
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
|
||||
ctx = alloc_data.ctx;
|
||||
hctx = alloc_data.hctx;
|
||||
}
|
||||
|
||||
hctx->queued++;
|
||||
data->hctx = hctx;
|
||||
|
Loading…
Reference in New Issue
Block a user