forked from Minki/linux
blk-mq: Change shared sbitmap naming to shared tags
Now that shared sbitmap support really means shared tags, rename symbols to match that. Signed-off-by: John Garry <john.garry@huawei.com> Link: https://lore.kernel.org/r/1633429419-228500-15-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
ae0f1a732f
commit
079a2e3e86
@ -554,7 +554,7 @@ struct request_queue *blk_alloc_queue(int node_id)
|
||||
|
||||
q->node = node_id;
|
||||
|
||||
atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
|
||||
atomic_set(&q->nr_active_requests_shared_tags, 0);
|
||||
|
||||
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
|
||||
INIT_WORK(&q->timeout_work, blk_timeout_work);
|
||||
|
@ -519,8 +519,8 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) {
|
||||
hctx->sched_tags = q->shared_sbitmap_tags;
|
||||
if (blk_mq_is_shared_tags(q->tag_set->flags)) {
|
||||
hctx->sched_tags = q->sched_shared_tags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -532,10 +532,10 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue)
|
||||
static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
|
||||
{
|
||||
blk_mq_free_rq_map(queue->shared_sbitmap_tags);
|
||||
queue->shared_sbitmap_tags = NULL;
|
||||
blk_mq_free_rq_map(queue->sched_shared_tags);
|
||||
queue->sched_shared_tags = NULL;
|
||||
}
|
||||
|
||||
/* called in queue's release handler, tagset has gone away */
|
||||
@ -546,17 +546,17 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int fla
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (hctx->sched_tags) {
|
||||
if (!blk_mq_is_sbitmap_shared(q->tag_set->flags))
|
||||
if (!blk_mq_is_shared_tags(q->tag_set->flags))
|
||||
blk_mq_free_rq_map(hctx->sched_tags);
|
||||
hctx->sched_tags = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (blk_mq_is_sbitmap_shared(flags))
|
||||
blk_mq_exit_sched_shared_sbitmap(q);
|
||||
if (blk_mq_is_shared_tags(flags))
|
||||
blk_mq_exit_sched_shared_tags(q);
|
||||
}
|
||||
|
||||
static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
|
||||
static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
|
||||
{
|
||||
struct blk_mq_tag_set *set = queue->tag_set;
|
||||
|
||||
@ -564,13 +564,13 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
|
||||
* Set initial depth at max so that we don't need to reallocate for
|
||||
* updating nr_requests.
|
||||
*/
|
||||
queue->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set,
|
||||
queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
|
||||
BLK_MQ_NO_HCTX_IDX,
|
||||
MAX_SCHED_RQ);
|
||||
if (!queue->shared_sbitmap_tags)
|
||||
if (!queue->sched_shared_tags)
|
||||
return -ENOMEM;
|
||||
|
||||
blk_mq_tag_update_sched_shared_sbitmap(queue);
|
||||
blk_mq_tag_update_sched_shared_tags(queue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -596,8 +596,8 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
|
||||
BLKDEV_DEFAULT_RQ);
|
||||
|
||||
if (blk_mq_is_sbitmap_shared(flags)) {
|
||||
ret = blk_mq_init_sched_shared_sbitmap(q);
|
||||
if (blk_mq_is_shared_tags(flags)) {
|
||||
ret = blk_mq_init_sched_shared_tags(q);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -647,8 +647,8 @@ void blk_mq_sched_free_rqs(struct request_queue *q)
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) {
|
||||
blk_mq_free_rqs(q->tag_set, q->shared_sbitmap_tags,
|
||||
if (blk_mq_is_shared_tags(q->tag_set->flags)) {
|
||||
blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
|
||||
BLK_MQ_NO_HCTX_IDX);
|
||||
} else {
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
|
@ -24,7 +24,7 @@
|
||||
*/
|
||||
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (blk_mq_is_sbitmap_shared(hctx->flags)) {
|
||||
if (blk_mq_is_shared_tags(hctx->flags)) {
|
||||
struct request_queue *q = hctx->queue;
|
||||
|
||||
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
|
||||
@ -57,19 +57,19 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct blk_mq_tags *tags = hctx->tags;
|
||||
|
||||
if (blk_mq_is_sbitmap_shared(hctx->flags)) {
|
||||
if (blk_mq_is_shared_tags(hctx->flags)) {
|
||||
struct request_queue *q = hctx->queue;
|
||||
|
||||
if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
|
||||
&q->queue_flags))
|
||||
return;
|
||||
atomic_dec(&tags->active_queues);
|
||||
} else {
|
||||
if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
|
||||
return;
|
||||
atomic_dec(&tags->active_queues);
|
||||
}
|
||||
|
||||
atomic_dec(&tags->active_queues);
|
||||
|
||||
blk_mq_tag_wakeup_all(tags, false);
|
||||
}
|
||||
|
||||
@ -557,7 +557,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
||||
* Only the sbitmap needs resizing since we allocated the max
|
||||
* initially.
|
||||
*/
|
||||
if (blk_mq_is_sbitmap_shared(set->flags))
|
||||
if (blk_mq_is_shared_tags(set->flags))
|
||||
return 0;
|
||||
|
||||
new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
|
||||
@ -578,16 +578,16 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size)
|
||||
void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
|
||||
{
|
||||
struct blk_mq_tags *tags = set->shared_sbitmap_tags;
|
||||
struct blk_mq_tags *tags = set->shared_tags;
|
||||
|
||||
sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
|
||||
}
|
||||
|
||||
void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q)
|
||||
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
|
||||
{
|
||||
sbitmap_queue_resize(&q->shared_sbitmap_tags->bitmap_tags,
|
||||
sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
|
||||
q->nr_requests - q->tag_set->reserved_tags);
|
||||
}
|
||||
|
||||
|
@ -43,9 +43,9 @@ extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
|
||||
extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
||||
struct blk_mq_tags **tags,
|
||||
unsigned int depth, bool can_grow);
|
||||
extern void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set,
|
||||
extern void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
|
||||
unsigned int size);
|
||||
extern void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q);
|
||||
extern void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
|
||||
|
||||
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
|
||||
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
||||
|
@ -2235,7 +2235,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||
blk_insert_flush(rq);
|
||||
blk_mq_run_hw_queue(data.hctx, true);
|
||||
} else if (plug && (q->nr_hw_queues == 1 ||
|
||||
blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
|
||||
blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
|
||||
q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
|
||||
/*
|
||||
* Use plugging if we have a ->commit_rqs() hook as well, as
|
||||
@ -2353,8 +2353,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
|
||||
struct blk_mq_tags *drv_tags;
|
||||
struct page *page;
|
||||
|
||||
if (blk_mq_is_sbitmap_shared(set->flags))
|
||||
drv_tags = set->shared_sbitmap_tags;
|
||||
if (blk_mq_is_shared_tags(set->flags))
|
||||
drv_tags = set->shared_tags;
|
||||
else
|
||||
drv_tags = set->tags[hctx_idx];
|
||||
|
||||
@ -2883,8 +2883,8 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
|
||||
static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
|
||||
int hctx_idx)
|
||||
{
|
||||
if (blk_mq_is_sbitmap_shared(set->flags)) {
|
||||
set->tags[hctx_idx] = set->shared_sbitmap_tags;
|
||||
if (blk_mq_is_shared_tags(set->flags)) {
|
||||
set->tags[hctx_idx] = set->shared_tags;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -2908,7 +2908,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
|
||||
static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
if (!blk_mq_is_sbitmap_shared(set->flags))
|
||||
if (!blk_mq_is_shared_tags(set->flags))
|
||||
blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
|
||||
|
||||
set->tags[hctx_idx] = NULL;
|
||||
@ -3375,11 +3375,11 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (blk_mq_is_sbitmap_shared(set->flags)) {
|
||||
set->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set,
|
||||
if (blk_mq_is_shared_tags(set->flags)) {
|
||||
set->shared_tags = blk_mq_alloc_map_and_rqs(set,
|
||||
BLK_MQ_NO_HCTX_IDX,
|
||||
set->queue_depth);
|
||||
if (!set->shared_sbitmap_tags)
|
||||
if (!set->shared_tags)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -3395,8 +3395,8 @@ out_unwind:
|
||||
while (--i >= 0)
|
||||
__blk_mq_free_map_and_rqs(set, i);
|
||||
|
||||
if (blk_mq_is_sbitmap_shared(set->flags)) {
|
||||
blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags,
|
||||
if (blk_mq_is_shared_tags(set->flags)) {
|
||||
blk_mq_free_map_and_rqs(set, set->shared_tags,
|
||||
BLK_MQ_NO_HCTX_IDX);
|
||||
}
|
||||
|
||||
@ -3617,8 +3617,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
|
||||
for (i = 0; i < set->nr_hw_queues; i++)
|
||||
__blk_mq_free_map_and_rqs(set, i);
|
||||
|
||||
if (blk_mq_is_sbitmap_shared(set->flags)) {
|
||||
blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags,
|
||||
if (blk_mq_is_shared_tags(set->flags)) {
|
||||
blk_mq_free_map_and_rqs(set, set->shared_tags,
|
||||
BLK_MQ_NO_HCTX_IDX);
|
||||
}
|
||||
|
||||
@ -3669,11 +3669,11 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
|
||||
}
|
||||
if (!ret) {
|
||||
q->nr_requests = nr;
|
||||
if (blk_mq_is_sbitmap_shared(set->flags)) {
|
||||
if (blk_mq_is_shared_tags(set->flags)) {
|
||||
if (q->elevator)
|
||||
blk_mq_tag_update_sched_shared_sbitmap(q);
|
||||
blk_mq_tag_update_sched_shared_tags(q);
|
||||
else
|
||||
blk_mq_tag_resize_shared_sbitmap(set, nr);
|
||||
blk_mq_tag_resize_shared_tags(set, nr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ struct blk_mq_alloc_data {
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
};
|
||||
|
||||
static inline bool blk_mq_is_sbitmap_shared(unsigned int flags)
|
||||
static inline bool blk_mq_is_shared_tags(unsigned int flags)
|
||||
{
|
||||
return flags & BLK_MQ_F_TAG_HCTX_SHARED;
|
||||
}
|
||||
@ -217,24 +217,24 @@ static inline int blk_mq_get_rq_budget_token(struct request *rq)
|
||||
|
||||
static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (blk_mq_is_sbitmap_shared(hctx->flags))
|
||||
atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
|
||||
if (blk_mq_is_shared_tags(hctx->flags))
|
||||
atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
|
||||
else
|
||||
atomic_inc(&hctx->nr_active);
|
||||
}
|
||||
|
||||
static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (blk_mq_is_sbitmap_shared(hctx->flags))
|
||||
atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
|
||||
if (blk_mq_is_shared_tags(hctx->flags))
|
||||
atomic_dec(&hctx->queue->nr_active_requests_shared_tags);
|
||||
else
|
||||
atomic_dec(&hctx->nr_active);
|
||||
}
|
||||
|
||||
static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (blk_mq_is_sbitmap_shared(hctx->flags))
|
||||
return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
|
||||
if (blk_mq_is_shared_tags(hctx->flags))
|
||||
return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
|
||||
return atomic_read(&hctx->nr_active);
|
||||
}
|
||||
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
|
||||
@ -328,7 +328,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
||||
if (bt->sb.depth == 1)
|
||||
return true;
|
||||
|
||||
if (blk_mq_is_sbitmap_shared(hctx->flags)) {
|
||||
if (blk_mq_is_shared_tags(hctx->flags)) {
|
||||
struct request_queue *q = hctx->queue;
|
||||
|
||||
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
|
||||
|
@ -637,7 +637,7 @@ static struct elevator_type *elevator_get_default(struct request_queue *q)
|
||||
return NULL;
|
||||
|
||||
if (q->nr_hw_queues != 1 &&
|
||||
!blk_mq_is_sbitmap_shared(q->tag_set->flags))
|
||||
!blk_mq_is_shared_tags(q->tag_set->flags))
|
||||
return NULL;
|
||||
|
||||
return elevator_get(q, "mq-deadline", false);
|
||||
|
@ -442,9 +442,9 @@ enum hctx_type {
|
||||
* tag set.
|
||||
* @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
|
||||
* elements.
|
||||
* @shared_sbitmap_tags:
|
||||
* Shared sbitmap set of tags. Has @nr_hw_queues elements. If
|
||||
* set, shared by all @tags.
|
||||
* @shared_tags:
|
||||
* Shared set of tags. Has @nr_hw_queues elements. If set,
|
||||
* shared by all @tags.
|
||||
* @tag_list_lock: Serializes tag_list accesses.
|
||||
* @tag_list: List of the request queues that use this tag set. See also
|
||||
* request_queue.tag_set_list.
|
||||
@ -464,7 +464,7 @@ struct blk_mq_tag_set {
|
||||
|
||||
struct blk_mq_tags **tags;
|
||||
|
||||
struct blk_mq_tags *shared_sbitmap_tags;
|
||||
struct blk_mq_tags *shared_tags;
|
||||
|
||||
struct mutex tag_list_lock;
|
||||
struct list_head tag_list;
|
||||
|
@ -236,9 +236,9 @@ struct request_queue {
|
||||
struct timer_list timeout;
|
||||
struct work_struct timeout_work;
|
||||
|
||||
atomic_t nr_active_requests_shared_sbitmap;
|
||||
atomic_t nr_active_requests_shared_tags;
|
||||
|
||||
struct blk_mq_tags *shared_sbitmap_tags;
|
||||
struct blk_mq_tags *sched_shared_tags;
|
||||
|
||||
struct list_head icq_list;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
|
Loading…
Reference in New Issue
Block a user