diff --git a/block/blk-core.c b/block/blk-core.c index 09d10bb95fda..4f791a3114a1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -287,7 +287,7 @@ bool blk_queue_start_drain(struct request_queue *q) * entering queue, so we call blk_freeze_queue_start() to * prevent I/O from crossing blk_queue_enter(). */ - bool freeze = __blk_freeze_queue_start(q); + bool freeze = __blk_freeze_queue_start(q, current); if (queue_is_mq(q)) blk_mq_wake_waiters(q); /* Make blk_queue_enter() reexamine the DYING flag. */ diff --git a/block/blk-mq.c b/block/blk-mq.c index 5f4496220432..5e240a4b6be0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -120,20 +120,66 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, inflight[1] = mi.inflight[1]; } -bool __blk_freeze_queue_start(struct request_queue *q) +#ifdef CONFIG_LOCKDEP +static bool blk_freeze_set_owner(struct request_queue *q, + struct task_struct *owner) { - int freeze; + if (!owner) + return false; + + if (!q->mq_freeze_depth) { + q->mq_freeze_owner = owner; + q->mq_freeze_owner_depth = 1; + return true; + } + + if (owner == q->mq_freeze_owner) + q->mq_freeze_owner_depth += 1; + return false; +} + +/* verify the last unfreeze in owner context */ +static bool blk_unfreeze_check_owner(struct request_queue *q) +{ + if (!q->mq_freeze_owner) + return false; + if (q->mq_freeze_owner != current) + return false; + if (--q->mq_freeze_owner_depth == 0) { + q->mq_freeze_owner = NULL; + return true; + } + return false; +} + +#else + +static bool blk_freeze_set_owner(struct request_queue *q, + struct task_struct *owner) +{ + return false; +} + +static bool blk_unfreeze_check_owner(struct request_queue *q) +{ + return false; +} +#endif + +bool __blk_freeze_queue_start(struct request_queue *q, + struct task_struct *owner) +{ + bool freeze; mutex_lock(&q->mq_freeze_lock); + freeze = blk_freeze_set_owner(q, owner); if (++q->mq_freeze_depth == 1) { percpu_ref_kill(&q->q_usage_counter); mutex_unlock(&q->mq_freeze_lock); if (queue_is_mq(q)) blk_mq_run_hw_queues(q, false); - freeze = true; } else { mutex_unlock(&q->mq_freeze_lock); - freeze = false; } return freeze; @@ -141,7 +187,7 @@ bool __blk_freeze_queue_start(struct request_queue *q) void blk_freeze_queue_start(struct request_queue *q) { - if (__blk_freeze_queue_start(q)) + if (__blk_freeze_queue_start(q, current)) blk_freeze_acquire_lock(q, false, false); } EXPORT_SYMBOL_GPL(blk_freeze_queue_start); @@ -170,7 +216,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) { - int unfreeze = false; + bool unfreeze; mutex_lock(&q->mq_freeze_lock); if (force_atomic) @@ -180,8 +226,8 @@ bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) if (!q->mq_freeze_depth) { percpu_ref_resurrect(&q->q_usage_counter); wake_up_all(&q->mq_freeze_wq); - unfreeze = true; } + unfreeze = blk_unfreeze_check_owner(q); mutex_unlock(&q->mq_freeze_lock); return unfreeze; @@ -203,7 +249,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); */ void blk_freeze_queue_start_non_owner(struct request_queue *q) { - __blk_freeze_queue_start(q); + __blk_freeze_queue_start(q, NULL); } EXPORT_SYMBOL_GPL(blk_freeze_queue_start_non_owner); diff --git a/block/blk.h b/block/blk.h index ac48b79cbf80..57fc035620d6 100644 --- a/block/blk.h +++ b/block/blk.h @@ -37,7 +37,8 @@ void blk_free_flush_queue(struct blk_flush_queue *q); bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); bool blk_queue_start_drain(struct request_queue *q); -bool __blk_freeze_queue_start(struct request_queue *q); +bool __blk_freeze_queue_start(struct request_queue *q, + struct task_struct *owner); int __bio_queue_enter(struct request_queue *q, struct bio *bio); void submit_bio_noacct_nocheck(struct bio *bio); void bio_await_chain(struct bio *bio); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 93551772c1d6..1b51a7c92e9b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -575,6 +575,10 @@ struct request_queue { struct throtl_data *td; #endif struct rcu_head rcu_head; +#ifdef CONFIG_LOCKDEP + struct task_struct *mq_freeze_owner; + int mq_freeze_owner_depth; +#endif wait_queue_head_t mq_freeze_wq; /* * Protect concurrent access to q_usage_counter by