blk-mq: pass a tagset to blk_mq_wait_quiesce_done

Nothing in blk_mq_wait_quiesce_done needs the request_queue now, so just
pass the tagset, and move the non-mq check into the only caller that
needs it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Chao Leng <lengchao@huawei.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20221101150050.3510-13-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2022-11-01 16:00:48 +01:00 committed by Jens Axboe
parent 80bd4a7aab
commit 483239c75b
4 changed files with 13 additions and 11 deletions

View File

@ -254,15 +254,17 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
/**
* blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
* @q: request queue.
* @set: tag_set to wait on
*
* Note: it is driver's responsibility for making sure that quiesce has
* been started.
* been started on or more of the request_queues of the tag_set. This
* function only waits for the quiesce on those request_queues that had
* the quiesce flag set using blk_mq_quiesce_queue_nowait.
*/
void blk_mq_wait_quiesce_done(struct request_queue *q)
void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set)
{
if (q->tag_set->flags & BLK_MQ_F_BLOCKING)
synchronize_srcu(q->tag_set->srcu);
if (set->flags & BLK_MQ_F_BLOCKING)
synchronize_srcu(set->srcu);
else
synchronize_rcu();
}
@ -282,7 +284,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
blk_mq_quiesce_queue_nowait(q);
/* nothing to wait for non-mq queues */
if (queue_is_mq(q))
blk_mq_wait_quiesce_done(q);
blk_mq_wait_quiesce_done(q->tag_set);
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
@ -1623,7 +1625,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
* uses srcu or rcu, wait for a synchronization point to
* ensure all running submits have finished
*/
blk_mq_wait_quiesce_done(q);
blk_mq_wait_quiesce_done(q->tag_set);
expired.next = 0;
blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);

View File

@ -5107,7 +5107,7 @@ static void nvme_stop_ns_queue(struct nvme_ns *ns)
if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags))
blk_mq_quiesce_queue(ns->queue);
else
blk_mq_wait_quiesce_done(ns->queue);
blk_mq_wait_quiesce_done(ns->queue->tag_set);
}
/* let I/O to all namespaces fail in preparation for surprise removal */
@ -5197,7 +5197,7 @@ void nvme_stop_admin_queue(struct nvme_ctrl *ctrl)
if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
blk_mq_quiesce_queue(ctrl->admin_q);
else
blk_mq_wait_quiesce_done(ctrl->admin_q);
blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set);
}
EXPORT_SYMBOL_GPL(nvme_stop_admin_queue);

View File

@ -2735,7 +2735,7 @@ static void scsi_stop_queue(struct scsi_device *sdev, bool nowait)
blk_mq_quiesce_queue(sdev->request_queue);
} else {
if (!nowait)
blk_mq_wait_quiesce_done(sdev->request_queue);
blk_mq_wait_quiesce_done(sdev->request_queue->tag_set);
}
}

View File

@ -880,7 +880,7 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
void blk_mq_wait_quiesce_done(struct request_queue *q);
void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);