forked from Minki/linux
block: Provide blk_mq_sched_get_icq()
Currently we lookup ICQ only after the request is allocated. However BFQ will want to decide how many scheduler tags it allows a given bfq queue (effectively a process) to consume based on cgroup weight. So provide a function blk_mq_sched_get_icq() so that BFQ can lookup ICQ earlier. Acked-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20211125133645.27483-1-jack@suse.cz Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
639d353143
commit
790cf9c848
@ -18,9 +18,8 @@
|
|||||||
#include "blk-mq-tag.h"
|
#include "blk-mq-tag.h"
|
||||||
#include "blk-wbt.h"
|
#include "blk-wbt.h"
|
||||||
|
|
||||||
void blk_mq_sched_assign_ioc(struct request *rq)
|
struct io_cq *blk_mq_sched_get_icq(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
|
||||||
struct io_context *ioc;
|
struct io_context *ioc;
|
||||||
struct io_cq *icq;
|
struct io_cq *icq;
|
||||||
|
|
||||||
@ -28,22 +27,27 @@ void blk_mq_sched_assign_ioc(struct request *rq)
|
|||||||
if (unlikely(!current->io_context))
|
if (unlikely(!current->io_context))
|
||||||
create_task_io_context(current, GFP_ATOMIC, q->node);
|
create_task_io_context(current, GFP_ATOMIC, q->node);
|
||||||
|
|
||||||
/*
|
/* May not have an IO context if context creation failed */
|
||||||
* May not have an IO context if it's a passthrough request
|
|
||||||
*/
|
|
||||||
ioc = current->io_context;
|
ioc = current->io_context;
|
||||||
if (!ioc)
|
if (!ioc)
|
||||||
return;
|
return NULL;
|
||||||
|
|
||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
icq = ioc_lookup_icq(ioc, q);
|
icq = ioc_lookup_icq(ioc, q);
|
||||||
spin_unlock_irq(&q->queue_lock);
|
spin_unlock_irq(&q->queue_lock);
|
||||||
|
if (icq)
|
||||||
|
return icq;
|
||||||
|
return ioc_create_icq(ioc, q, GFP_ATOMIC);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_mq_sched_get_icq);
|
||||||
|
|
||||||
if (!icq) {
|
void blk_mq_sched_assign_ioc(struct request *rq)
|
||||||
icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
|
{
|
||||||
|
struct io_cq *icq;
|
||||||
|
|
||||||
|
icq = blk_mq_sched_get_icq(rq->q);
|
||||||
if (!icq)
|
if (!icq)
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
get_io_context(icq->ioc);
|
get_io_context(icq->ioc);
|
||||||
rq->elv.icq = icq;
|
rq->elv.icq = icq;
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
|
|
||||||
#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
|
#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
|
||||||
|
|
||||||
|
struct io_cq *blk_mq_sched_get_icq(struct request_queue *q);
|
||||||
void blk_mq_sched_assign_ioc(struct request *rq);
|
void blk_mq_sched_assign_ioc(struct request *rq);
|
||||||
|
|
||||||
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||||
|
Loading…
Reference in New Issue
Block a user