mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
blk-mq: allow software queue to map to multiple hardware queues
The mapping used to be dependent on just the CPU location, but now it's a tuple of (type, cpu) instead. This is a prep patch for allowing a single software queue to map to multiple hardware queues. No functional changes in this patch. This changes the software queue count to an unsigned short to save a bit of space. We can still support 64K-1 CPUs, which should be enough. Add a check to catch a wrap. Reviewed-by: Hannes Reinecke <hare@suse.com> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f9afca4d36
commit
f31967f0e4
@ -109,7 +109,7 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
|
|||||||
static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
|
static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
|
||||||
struct blk_mq_ctx *ctx)
|
struct blk_mq_ctx *ctx)
|
||||||
{
|
{
|
||||||
unsigned idx = ctx->index_hw;
|
unsigned short idx = ctx->index_hw[hctx->type];
|
||||||
|
|
||||||
if (++idx == hctx->nr_ctx)
|
if (++idx == hctx->nr_ctx)
|
||||||
idx = 0;
|
idx = 0;
|
||||||
|
@ -75,14 +75,18 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
|
|||||||
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
|
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
|
||||||
struct blk_mq_ctx *ctx)
|
struct blk_mq_ctx *ctx)
|
||||||
{
|
{
|
||||||
if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
|
const int bit = ctx->index_hw[hctx->type];
|
||||||
sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
|
|
||||||
|
if (!sbitmap_test_bit(&hctx->ctx_map, bit))
|
||||||
|
sbitmap_set_bit(&hctx->ctx_map, bit);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
|
static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
|
||||||
struct blk_mq_ctx *ctx)
|
struct blk_mq_ctx *ctx)
|
||||||
{
|
{
|
||||||
sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
|
const int bit = ctx->index_hw[hctx->type];
|
||||||
|
|
||||||
|
sbitmap_clear_bit(&hctx->ctx_map, bit);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct mq_inflight {
|
struct mq_inflight {
|
||||||
@ -955,7 +959,7 @@ static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
|
|||||||
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
|
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
|
||||||
struct blk_mq_ctx *start)
|
struct blk_mq_ctx *start)
|
||||||
{
|
{
|
||||||
unsigned off = start ? start->index_hw : 0;
|
unsigned off = start ? start->index_hw[hctx->type] : 0;
|
||||||
struct dispatch_rq_data data = {
|
struct dispatch_rq_data data = {
|
||||||
.hctx = hctx,
|
.hctx = hctx,
|
||||||
.rq = NULL,
|
.rq = NULL,
|
||||||
@ -2343,10 +2347,16 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
|||||||
|
|
||||||
ctx = per_cpu_ptr(q->queue_ctx, i);
|
ctx = per_cpu_ptr(q->queue_ctx, i);
|
||||||
hctx = blk_mq_map_queue_type(q, 0, i);
|
hctx = blk_mq_map_queue_type(q, 0, i);
|
||||||
|
hctx->type = 0;
|
||||||
cpumask_set_cpu(i, hctx->cpumask);
|
cpumask_set_cpu(i, hctx->cpumask);
|
||||||
ctx->index_hw = hctx->nr_ctx;
|
ctx->index_hw[hctx->type] = hctx->nr_ctx;
|
||||||
hctx->ctxs[hctx->nr_ctx++] = ctx;
|
hctx->ctxs[hctx->nr_ctx++] = ctx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the nr_ctx type overflows, we have exceeded the
|
||||||
|
* amount of sw queues we can support.
|
||||||
|
*/
|
||||||
|
BUG_ON(!hctx->nr_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&q->sysfs_lock);
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
@ -17,7 +17,7 @@ struct blk_mq_ctx {
|
|||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
unsigned int index_hw;
|
unsigned short index_hw[HCTX_MAX_TYPES];
|
||||||
|
|
||||||
/* incremented at dispatch time */
|
/* incremented at dispatch time */
|
||||||
unsigned long rq_dispatched[2];
|
unsigned long rq_dispatched[2];
|
||||||
|
@ -576,7 +576,7 @@ static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
|
|||||||
{
|
{
|
||||||
struct kyber_hctx_data *khd = hctx->sched_data;
|
struct kyber_hctx_data *khd = hctx->sched_data;
|
||||||
struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
|
struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
|
||||||
struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw];
|
struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
|
||||||
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
|
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
|
||||||
struct list_head *rq_list = &kcq->rq_list[sched_domain];
|
struct list_head *rq_list = &kcq->rq_list[sched_domain];
|
||||||
bool merged;
|
bool merged;
|
||||||
@ -602,7 +602,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
|
|||||||
|
|
||||||
list_for_each_entry_safe(rq, next, rq_list, queuelist) {
|
list_for_each_entry_safe(rq, next, rq_list, queuelist) {
|
||||||
unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
|
unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
|
||||||
struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw];
|
struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
|
||||||
struct list_head *head = &kcq->rq_list[sched_domain];
|
struct list_head *head = &kcq->rq_list[sched_domain];
|
||||||
|
|
||||||
spin_lock(&kcq->lock);
|
spin_lock(&kcq->lock);
|
||||||
@ -611,7 +611,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
|
|||||||
else
|
else
|
||||||
list_move_tail(&rq->queuelist, head);
|
list_move_tail(&rq->queuelist, head);
|
||||||
sbitmap_set_bit(&khd->kcq_map[sched_domain],
|
sbitmap_set_bit(&khd->kcq_map[sched_domain],
|
||||||
rq->mq_ctx->index_hw);
|
rq->mq_ctx->index_hw[hctx->type]);
|
||||||
blk_mq_sched_request_inserted(rq);
|
blk_mq_sched_request_inserted(rq);
|
||||||
spin_unlock(&kcq->lock);
|
spin_unlock(&kcq->lock);
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,8 @@ struct blk_mq_hw_ctx {
|
|||||||
struct blk_mq_ctx *dispatch_from;
|
struct blk_mq_ctx *dispatch_from;
|
||||||
unsigned int dispatch_busy;
|
unsigned int dispatch_busy;
|
||||||
|
|
||||||
unsigned int nr_ctx;
|
unsigned short type;
|
||||||
|
unsigned short nr_ctx;
|
||||||
struct blk_mq_ctx **ctxs;
|
struct blk_mq_ctx **ctxs;
|
||||||
|
|
||||||
spinlock_t dispatch_wait_lock;
|
spinlock_t dispatch_wait_lock;
|
||||||
|
Loading…
Reference in New Issue
Block a user