mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
block: rename blk_mq_freeze_queue_start()
As the .q_usage_counter is used by both legacy and mq path, we need to block new I/O if queue becomes dead in blk_queue_enter(). So rename it and we can use this function in both paths. Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
5ed61d3f08
commit
1671d522cd
@ -670,7 +670,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* read pair of barrier in blk_mq_freeze_queue_start(),
|
||||
* read pair of barrier in blk_freeze_queue_start(),
|
||||
* we need to order reading __PERCPU_REF_DEAD flag of
|
||||
* .q_usage_counter and reading .mq_freeze_depth,
|
||||
* otherwise the following wait may never return if the
|
||||
|
@ -68,7 +68,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
|
||||
sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
|
||||
}
|
||||
|
||||
void blk_mq_freeze_queue_start(struct request_queue *q)
|
||||
void blk_freeze_queue_start(struct request_queue *q)
|
||||
{
|
||||
int freeze_depth;
|
||||
|
||||
@ -78,7 +78,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
|
||||
blk_mq_run_hw_queues(q, false);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
|
||||
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
|
||||
|
||||
void blk_mq_freeze_queue_wait(struct request_queue *q)
|
||||
{
|
||||
@ -108,7 +108,7 @@ void blk_freeze_queue(struct request_queue *q)
|
||||
* no blk_unfreeze_queue(), and blk_freeze_queue() is not
|
||||
* exported to drivers as the only user for unfreeze is blk_mq.
|
||||
*/
|
||||
blk_mq_freeze_queue_start(q);
|
||||
blk_freeze_queue_start(q);
|
||||
blk_mq_freeze_queue_wait(q);
|
||||
}
|
||||
|
||||
@ -746,7 +746,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
|
||||
* percpu_ref_tryget directly, because we need to be able to
|
||||
* obtain a reference even in the short window between the queue
|
||||
* starting to freeze, by dropping the first reference in
|
||||
* blk_mq_freeze_queue_start, and the moment the last request is
|
||||
* blk_freeze_queue_start, and the moment the last request is
|
||||
* consumed, marked by the instant q_usage_counter reaches
|
||||
* zero.
|
||||
*/
|
||||
@ -2376,7 +2376,7 @@ static void blk_mq_queue_reinit_work(void)
|
||||
* take place in parallel.
|
||||
*/
|
||||
list_for_each_entry(q, &all_q_list, all_q_node)
|
||||
blk_mq_freeze_queue_start(q);
|
||||
blk_freeze_queue_start(q);
|
||||
list_for_each_entry(q, &all_q_list, all_q_node)
|
||||
blk_mq_freeze_queue_wait(q);
|
||||
|
||||
|
@ -4162,7 +4162,7 @@ static int mtip_block_remove(struct driver_data *dd)
|
||||
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
|
||||
dd->disk->disk_name);
|
||||
|
||||
blk_mq_freeze_queue_start(dd->queue);
|
||||
blk_freeze_queue_start(dd->queue);
|
||||
blk_mq_stop_hw_queues(dd->queue);
|
||||
blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
|
||||
|
||||
|
@ -2386,7 +2386,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||
blk_mq_freeze_queue_start(ns->queue);
|
||||
blk_freeze_queue_start(ns->queue);
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_start_freeze);
|
||||
|
@ -243,7 +243,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
||||
busy_tag_iter_fn *fn, void *priv);
|
||||
void blk_mq_freeze_queue(struct request_queue *q);
|
||||
void blk_mq_unfreeze_queue(struct request_queue *q);
|
||||
void blk_mq_freeze_queue_start(struct request_queue *q);
|
||||
void blk_freeze_queue_start(struct request_queue *q);
|
||||
void blk_mq_freeze_queue_wait(struct request_queue *q);
|
||||
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
|
||||
unsigned long timeout);
|
||||
|
Loading…
Reference in New Issue
Block a user