mirror of
https://github.com/torvalds/linux.git
synced 2024-11-30 16:11:38 +00:00
9e97d2951a
After commit:
923218f616
("blk-mq: don't allocate driver tag upfront for flush rq")
we no longer use the 'can_block' argument in
blk_mq_sched_insert_request(). Kill it.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Added actual commit message as to why it's being removed.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
101 lines
2.8 KiB
C
101 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef BLK_MQ_SCHED_H
|
|
#define BLK_MQ_SCHED_H
|
|
|
|
#include "blk-mq.h"
|
|
#include "blk-mq-tag.h"
|
|
|
|
void blk_mq_sched_free_hctx_data(struct request_queue *q,
|
|
void (*exit)(struct blk_mq_hw_ctx *));
|
|
|
|
void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio);
|
|
|
|
void blk_mq_sched_request_inserted(struct request *rq);
|
|
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
|
struct request **merged_request);
|
|
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
|
|
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
|
|
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
|
|
|
|
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
|
bool run_queue, bool async);
|
|
void blk_mq_sched_insert_requests(struct request_queue *q,
|
|
struct blk_mq_ctx *ctx,
|
|
struct list_head *list, bool run_queue_async);
|
|
|
|
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
|
|
|
|
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
|
|
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
|
|
|
|
int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
|
unsigned int hctx_idx);
|
|
void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
|
unsigned int hctx_idx);
|
|
|
|
int blk_mq_sched_init(struct request_queue *q);
|
|
|
|
static inline bool
|
|
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
|
|
{
|
|
if (blk_queue_nomerges(q) || !bio_mergeable(bio))
|
|
return false;
|
|
|
|
return __blk_mq_sched_bio_merge(q, bio);
|
|
}
|
|
|
|
static inline bool
|
|
blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
|
|
struct bio *bio)
|
|
{
|
|
struct elevator_queue *e = q->elevator;
|
|
|
|
if (e && e->type->ops.mq.allow_merge)
|
|
return e->type->ops.mq.allow_merge(q, rq, bio);
|
|
|
|
return true;
|
|
}
|
|
|
|
static inline void blk_mq_sched_completed_request(struct request *rq)
|
|
{
|
|
struct elevator_queue *e = rq->q->elevator;
|
|
|
|
if (e && e->type->ops.mq.completed_request)
|
|
e->type->ops.mq.completed_request(rq);
|
|
}
|
|
|
|
static inline void blk_mq_sched_started_request(struct request *rq)
|
|
{
|
|
struct request_queue *q = rq->q;
|
|
struct elevator_queue *e = q->elevator;
|
|
|
|
if (e && e->type->ops.mq.started_request)
|
|
e->type->ops.mq.started_request(rq);
|
|
}
|
|
|
|
static inline void blk_mq_sched_requeue_request(struct request *rq)
|
|
{
|
|
struct request_queue *q = rq->q;
|
|
struct elevator_queue *e = q->elevator;
|
|
|
|
if (e && e->type->ops.mq.requeue_request)
|
|
e->type->ops.mq.requeue_request(rq);
|
|
}
|
|
|
|
static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
struct elevator_queue *e = hctx->queue->elevator;
|
|
|
|
if (e && e->type->ops.mq.has_work)
|
|
return e->type->ops.mq.has_work(hctx);
|
|
|
|
return false;
|
|
}
|
|
|
|
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
|
}
|
|
|
|
#endif
|