blk-mq: blk_mq_tag_busy is no need to return a value

Currently "blk_mq_tag_busy" return value has no effect, so adjust it.
Some code implementations have also been adjusted to enhance
readability.

Signed-off-by: Liu Song <liusong@linux.alibaba.com>
Link: https://lore.kernel.org/r/1656170121-1619-1-git-send-email-liusong@linux.alibaba.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Liu Song 2022-06-25 23:15:21 +08:00 committed by Jens Axboe
parent a78418e6a0
commit ee78ec1077
2 changed files with 11 additions and 17 deletions

View File

@ -37,29 +37,25 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
* to get tag when first time, the other shared-tag users could reserve * to get tag when first time, the other shared-tag users could reserve
* budget for it. * budget for it.
*/ */
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{ {
unsigned int users; unsigned int users;
if (blk_mq_is_shared_tags(hctx->flags)) { if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) || if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) { return;
return true; set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags);
}
} else { } else {
if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) || if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) { return;
return true; set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state);
}
} }
users = atomic_inc_return(&hctx->tags->active_queues); users = atomic_inc_return(&hctx->tags->active_queues);
blk_mq_update_wake_batch(hctx->tags, users); blk_mq_update_wake_batch(hctx->tags, users);
return true;
} }
/* /*

View File

@ -47,15 +47,13 @@ enum {
BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
}; };
extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); extern void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{ {
if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
return false; __blk_mq_tag_busy(hctx);
return __blk_mq_tag_busy(hctx);
} }
static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)