mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
blk-mq: Drop busy_iter_fn blk_mq_hw_ctx argument
The only user of blk_mq_hw_ctx blk_mq_hw_ctx argument is blk_mq_rq_inflight(). Function blk_mq_rq_inflight() uses the hctx to find the associated request queue to match against the request. However this same check is already done in caller bt_iter(), so drop this check. With that change there are no more users of busy_iter_fn blk_mq_hw_ctx argument, so drop the argument. Reviewed-by Hannes Reinecke <hare@suse.de> Signed-off-by: John Garry <john.garry@huawei.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Tested-by: Kashyap Desai <kashyap.desai@broadcom.com> Link: https://lore.kernel.org/r/1638794990-137490-2-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
73f3760edd
commit
8ab30a3319
@ -254,7 +254,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
||||
return true;
|
||||
|
||||
if (rq->q == hctx->queue && rq->mq_hctx == hctx)
|
||||
ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
|
||||
ret = iter_data->fn(rq, iter_data->data, reserved);
|
||||
blk_mq_put_rq_ref(rq);
|
||||
return ret;
|
||||
}
|
||||
|
@ -127,8 +127,7 @@ struct mq_inflight {
|
||||
unsigned int inflight[2];
|
||||
};
|
||||
|
||||
static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, void *priv,
|
||||
static bool blk_mq_check_inflight(struct request *rq, void *priv,
|
||||
bool reserved)
|
||||
{
|
||||
struct mq_inflight *mi = priv;
|
||||
@ -1308,14 +1307,15 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
|
||||
|
||||
static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
void *priv, bool reserved)
|
||||
static bool blk_mq_rq_inflight(struct request *rq, void *priv,
|
||||
bool reserved)
|
||||
{
|
||||
/*
|
||||
* If we find a request that isn't idle and the queue matches,
|
||||
* we know the queue is busy. Return false to stop the iteration.
|
||||
* If we find a request that isn't idle we know the queue is busy
|
||||
* as it's checked in the iter.
|
||||
* Return false to stop the iteration.
|
||||
*/
|
||||
if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
|
||||
if (blk_mq_request_started(rq)) {
|
||||
bool *busy = priv;
|
||||
|
||||
*busy = true;
|
||||
@ -1377,8 +1377,7 @@ void blk_mq_put_rq_ref(struct request *rq)
|
||||
__blk_mq_free_request(rq);
|
||||
}
|
||||
|
||||
static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, void *priv, bool reserved)
|
||||
static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved)
|
||||
{
|
||||
unsigned long *next = priv;
|
||||
|
||||
|
@ -470,8 +470,7 @@ struct blk_mq_queue_data {
|
||||
bool last;
|
||||
};
|
||||
|
||||
typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
|
||||
bool);
|
||||
typedef bool (busy_iter_fn)(struct request *, void *, bool);
|
||||
typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user