mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
blk-mq: don't overwrite rq->mq_ctx
We do this in a few places, if the CPU is offline. This isn't allowed, though, since on multi queue hardware, we can't just move a request from one software queue to another, if they map to different hardware queues. The request and tag isn't valid on another hardware queue. This can happen if plugging races with CPU offlining. But it does no harm, since it can only happen in the window where we are currently busy freezing the queue and flushing IO, in preparation for redoing the software <-> hardware queue mappings. Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
4d70dca4ea
commit
e57690fe00
@ -1036,10 +1036,11 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
|
|||||||
EXPORT_SYMBOL(blk_mq_delay_queue);
|
EXPORT_SYMBOL(blk_mq_delay_queue);
|
||||||
|
|
||||||
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
|
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
|
||||||
struct blk_mq_ctx *ctx,
|
|
||||||
struct request *rq,
|
struct request *rq,
|
||||||
bool at_head)
|
bool at_head)
|
||||||
{
|
{
|
||||||
|
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||||
|
|
||||||
trace_block_rq_insert(hctx->queue, rq);
|
trace_block_rq_insert(hctx->queue, rq);
|
||||||
|
|
||||||
if (at_head)
|
if (at_head)
|
||||||
@ -1053,20 +1054,16 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
|
|||||||
{
|
{
|
||||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||||
|
|
||||||
__blk_mq_insert_req_list(hctx, ctx, rq, at_head);
|
__blk_mq_insert_req_list(hctx, rq, at_head);
|
||||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
||||||
bool async)
|
bool async)
|
||||||
{
|
{
|
||||||
|
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
|
|
||||||
|
|
||||||
current_ctx = blk_mq_get_ctx(q);
|
|
||||||
if (!cpu_online(ctx->cpu))
|
|
||||||
rq->mq_ctx = ctx = current_ctx;
|
|
||||||
|
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||||
|
|
||||||
@ -1076,8 +1073,6 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
|||||||
|
|
||||||
if (run_queue)
|
if (run_queue)
|
||||||
blk_mq_run_hw_queue(hctx, async);
|
blk_mq_run_hw_queue(hctx, async);
|
||||||
|
|
||||||
blk_mq_put_ctx(current_ctx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_insert_requests(struct request_queue *q,
|
static void blk_mq_insert_requests(struct request_queue *q,
|
||||||
@ -1088,14 +1083,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
|
|||||||
|
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
struct blk_mq_ctx *current_ctx;
|
|
||||||
|
|
||||||
trace_block_unplug(q, depth, !from_schedule);
|
trace_block_unplug(q, depth, !from_schedule);
|
||||||
|
|
||||||
current_ctx = blk_mq_get_ctx(q);
|
|
||||||
|
|
||||||
if (!cpu_online(ctx->cpu))
|
|
||||||
ctx = current_ctx;
|
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1107,15 +1097,14 @@ static void blk_mq_insert_requests(struct request_queue *q,
|
|||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
rq = list_first_entry(list, struct request, queuelist);
|
rq = list_first_entry(list, struct request, queuelist);
|
||||||
|
BUG_ON(rq->mq_ctx != ctx);
|
||||||
list_del_init(&rq->queuelist);
|
list_del_init(&rq->queuelist);
|
||||||
rq->mq_ctx = ctx;
|
__blk_mq_insert_req_list(hctx, rq, false);
|
||||||
__blk_mq_insert_req_list(hctx, ctx, rq, false);
|
|
||||||
}
|
}
|
||||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||||
spin_unlock(&ctx->lock);
|
spin_unlock(&ctx->lock);
|
||||||
|
|
||||||
blk_mq_run_hw_queue(hctx, from_schedule);
|
blk_mq_run_hw_queue(hctx, from_schedule);
|
||||||
blk_mq_put_ctx(current_ctx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
|
static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
|
||||||
@ -1630,16 +1619,17 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 'cpu' is going away. splice any existing rq_list entries from this
|
||||||
|
* software queue to the hw queue dispatch list, and ensure that it
|
||||||
|
* gets run.
|
||||||
|
*/
|
||||||
static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
|
static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
|
||||||
{
|
{
|
||||||
struct request_queue *q = hctx->queue;
|
|
||||||
struct blk_mq_ctx *ctx;
|
struct blk_mq_ctx *ctx;
|
||||||
LIST_HEAD(tmp);
|
LIST_HEAD(tmp);
|
||||||
|
|
||||||
/*
|
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
|
||||||
* Move ctx entries to new CPU, if this one is going away.
|
|
||||||
*/
|
|
||||||
ctx = __blk_mq_get_ctx(q, cpu);
|
|
||||||
|
|
||||||
spin_lock(&ctx->lock);
|
spin_lock(&ctx->lock);
|
||||||
if (!list_empty(&ctx->rq_list)) {
|
if (!list_empty(&ctx->rq_list)) {
|
||||||
@ -1651,24 +1641,11 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
|
|||||||
if (list_empty(&tmp))
|
if (list_empty(&tmp))
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
|
||||||
ctx = blk_mq_get_ctx(q);
|
spin_lock(&hctx->lock);
|
||||||
spin_lock(&ctx->lock);
|
list_splice_tail_init(&tmp, &hctx->dispatch);
|
||||||
|
spin_unlock(&hctx->lock);
|
||||||
while (!list_empty(&tmp)) {
|
|
||||||
struct request *rq;
|
|
||||||
|
|
||||||
rq = list_first_entry(&tmp, struct request, queuelist);
|
|
||||||
rq->mq_ctx = ctx;
|
|
||||||
list_move_tail(&rq->queuelist, &ctx->rq_list);
|
|
||||||
}
|
|
||||||
|
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
|
||||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
|
||||||
|
|
||||||
spin_unlock(&ctx->lock);
|
|
||||||
|
|
||||||
blk_mq_run_hw_queue(hctx, true);
|
blk_mq_run_hw_queue(hctx, true);
|
||||||
blk_mq_put_ctx(ctx);
|
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user