mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 11:31:31 +00:00
block: fix ordering between checking BLK_MQ_S_STOPPED request adding
Supposing first scenario with a virtio_blk driver.
CPU0 CPU1
blk_mq_try_issue_directly()
__blk_mq_issue_directly()
q->mq_ops->queue_rq()
virtio_queue_rq()
blk_mq_stop_hw_queue()
virtblk_done()
blk_mq_request_bypass_insert() 1) store
blk_mq_start_stopped_hw_queue()
clear_bit(BLK_MQ_S_STOPPED) 3) store
blk_mq_run_hw_queue()
if (!blk_mq_hctx_has_pending()) 4) load
return
blk_mq_sched_dispatch_requests()
blk_mq_run_hw_queue()
if (!blk_mq_hctx_has_pending())
return
blk_mq_sched_dispatch_requests()
if (blk_mq_hctx_stopped()) 2) load
return
__blk_mq_sched_dispatch_requests()
Supposing another scenario.
CPU0 CPU1
blk_mq_requeue_work()
blk_mq_insert_request() 1) store
virtblk_done()
blk_mq_start_stopped_hw_queue()
blk_mq_run_hw_queues() clear_bit(BLK_MQ_S_STOPPED) 3) store
blk_mq_run_hw_queue()
if (!blk_mq_hctx_has_pending()) 4) load
return
blk_mq_sched_dispatch_requests()
if (blk_mq_hctx_stopped()) 2) load
continue
blk_mq_run_hw_queue()
Both scenarios are similar, the full memory barrier should be inserted
between 1) and 2), as well as between 3) and 4) to make sure that either
CPU0 sees BLK_MQ_S_STOPPED is cleared or CPU1 sees dispatch list.
Otherwise, either CPU will not rerun the hardware queue causing
starvation of the request.
The easy way to fix it is to add the essential full memory barrier into
helper of blk_mq_hctx_stopped(). In order to not affect the fast path
(hardware queue is not stopped most of the time), we only insert the
barrier into the slow path. Actually, only slow path needs to care about
missing of dispatching the request to the low-level device driver.
Fixes: 320ae51fee
("blk-mq: new multi-queue block IO queueing mechanism")
Cc: stable@vger.kernel.org
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20241014092934.53630-4-songmuchun@bytedance.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
6bda857bcb
commit
96a9fe64bf
@ -2438,6 +2438,12 @@ void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||
return;
|
||||
|
||||
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||
/*
|
||||
* Pairs with the smp_mb() in blk_mq_hctx_stopped() to order the
|
||||
* clearing of BLK_MQ_S_STOPPED above and the checking of dispatch
|
||||
* list in the subsequent routine.
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
blk_mq_run_hw_queue(hctx, async);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
|
||||
|
@ -230,6 +230,19 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data
|
||||
|
||||
static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
/* Fast path: hardware queue is not stopped most of the time. */
|
||||
if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* This barrier is used to order adding of dispatch list before and
|
||||
* the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier
|
||||
* in blk_mq_start_stopped_hw_queue() so that dispatch code could
|
||||
* either see BLK_MQ_S_STOPPED is cleared or dispatch list is not
|
||||
* empty to avoid missing dispatching requests.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user