forked from Minki/linux
blk-mq: Drop 'reserved' arg of busy_tag_iter_fn
We no longer use the 'reserved' arg in busy_tag_iter_fn for any iter function so it may be dropped. Signed-off-by: John Garry <john.garry@huawei.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> #nvme Reviewed-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/1657109034-206040-6-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
1263c1929f
commit
2dd6532e95
@ -375,7 +375,7 @@ struct show_busy_params {
|
||||
* e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
|
||||
* keep iterating requests.
|
||||
*/
|
||||
static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
|
||||
static bool hctx_show_busy_rq(struct request *rq, void *data)
|
||||
{
|
||||
const struct show_busy_params *params = data;
|
||||
|
||||
|
@ -283,7 +283,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
||||
return true;
|
||||
|
||||
if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
|
||||
ret = iter_data->fn(rq, iter_data->data, reserved);
|
||||
ret = iter_data->fn(rq, iter_data->data);
|
||||
blk_mq_put_rq_ref(rq);
|
||||
return ret;
|
||||
}
|
||||
@ -354,7 +354,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
||||
|
||||
if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
|
||||
blk_mq_request_started(rq))
|
||||
ret = iter_data->fn(rq, iter_data->data, reserved);
|
||||
ret = iter_data->fn(rq, iter_data->data);
|
||||
if (!iter_static_rqs)
|
||||
blk_mq_put_rq_ref(rq);
|
||||
return ret;
|
||||
@ -444,8 +444,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
|
||||
|
||||
static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
|
||||
void *data, bool reserved)
|
||||
static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
|
||||
{
|
||||
unsigned *count = data;
|
||||
|
||||
|
@ -129,8 +129,7 @@ struct mq_inflight {
|
||||
unsigned int inflight[2];
|
||||
};
|
||||
|
||||
static bool blk_mq_check_inflight(struct request *rq, void *priv,
|
||||
bool reserved)
|
||||
static bool blk_mq_check_inflight(struct request *rq, void *priv)
|
||||
{
|
||||
struct mq_inflight *mi = priv;
|
||||
|
||||
@ -1400,8 +1399,7 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
|
||||
|
||||
static bool blk_mq_rq_inflight(struct request *rq, void *priv,
|
||||
bool reserved)
|
||||
static bool blk_mq_rq_inflight(struct request *rq, void *priv)
|
||||
{
|
||||
/*
|
||||
* If we find a request that isn't idle we know the queue is busy
|
||||
@ -1470,7 +1468,7 @@ void blk_mq_put_rq_ref(struct request *rq)
|
||||
__blk_mq_free_request(rq);
|
||||
}
|
||||
|
||||
static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved)
|
||||
static bool blk_mq_check_expired(struct request *rq, void *priv)
|
||||
{
|
||||
unsigned long *next = priv;
|
||||
|
||||
@ -3289,7 +3287,7 @@ struct rq_iter_data {
|
||||
bool has_rq;
|
||||
};
|
||||
|
||||
static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
|
||||
static bool blk_mq_has_request(struct request *rq, void *data)
|
||||
{
|
||||
struct rq_iter_data *iter_data = data;
|
||||
|
||||
|
@ -2441,7 +2441,7 @@ static void mtip_softirq_done_fn(struct request *rq)
|
||||
blk_mq_end_request(rq, cmd->status);
|
||||
}
|
||||
|
||||
static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
|
||||
static bool mtip_abort_cmd(struct request *req, void *data)
|
||||
{
|
||||
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
|
||||
struct driver_data *dd = data;
|
||||
@ -2454,7 +2454,7 @@ static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mtip_queue_cmd(struct request *req, void *data, bool reserved)
|
||||
static bool mtip_queue_cmd(struct request *req, void *data)
|
||||
{
|
||||
struct driver_data *dd = data;
|
||||
|
||||
|
@ -879,7 +879,7 @@ static void recv_work(struct work_struct *work)
|
||||
kfree(args);
|
||||
}
|
||||
|
||||
static bool nbd_clear_req(struct request *req, void *data, bool reserved)
|
||||
static bool nbd_clear_req(struct request *req, void *data)
|
||||
{
|
||||
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
|
||||
|
||||
|
@ -1282,8 +1282,7 @@ struct srp_terminate_context {
|
||||
int scsi_result;
|
||||
};
|
||||
|
||||
static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr,
|
||||
bool reserved)
|
||||
static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
|
||||
{
|
||||
struct srp_terminate_context *context = context_ptr;
|
||||
struct srp_target_port *target = context->srp_target;
|
||||
|
@ -418,7 +418,7 @@ blk_status_t nvme_host_path_error(struct request *req)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_host_path_error);
|
||||
|
||||
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
|
||||
bool nvme_cancel_request(struct request *req, void *data)
|
||||
{
|
||||
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
|
||||
"Cancelling I/O %d", req->tag);
|
||||
|
@ -2456,8 +2456,7 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
|
||||
* status. The done path will return the io request back to the block
|
||||
* layer with an error status.
|
||||
*/
|
||||
static bool
|
||||
nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
|
||||
static bool nvme_fc_terminate_exchange(struct request *req, void *data)
|
||||
{
|
||||
struct nvme_ctrl *nctrl = data;
|
||||
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
|
||||
|
@ -697,7 +697,7 @@ static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
|
||||
}
|
||||
|
||||
blk_status_t nvme_host_path_error(struct request *req);
|
||||
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
|
||||
bool nvme_cancel_request(struct request *req, void *data);
|
||||
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
|
||||
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
|
||||
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
||||
|
@ -272,7 +272,7 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
|
||||
q->entries = qsize;
|
||||
}
|
||||
|
||||
static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data, bool rsvd)
|
||||
static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data)
|
||||
{
|
||||
int *active = data;
|
||||
|
||||
|
@ -633,7 +633,7 @@ struct fib_count_data {
|
||||
int krlcnt;
|
||||
};
|
||||
|
||||
static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data, bool reserved)
|
||||
static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data)
|
||||
{
|
||||
struct fib_count_data *fib_count = data;
|
||||
|
||||
|
@ -1350,8 +1350,7 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
|
||||
return wq_work_done;
|
||||
}
|
||||
|
||||
static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
|
||||
bool reserved)
|
||||
static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
|
||||
{
|
||||
const int tag = scsi_cmd_to_rq(sc)->tag;
|
||||
struct fnic *fnic = data;
|
||||
@ -1548,8 +1547,7 @@ struct fnic_rport_abort_io_iter_data {
|
||||
int term_cnt;
|
||||
};
|
||||
|
||||
static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
|
||||
bool reserved)
|
||||
static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
|
||||
{
|
||||
struct fnic_rport_abort_io_iter_data *iter_data = data;
|
||||
struct fnic *fnic = iter_data->fnic;
|
||||
@ -2003,8 +2001,7 @@ struct fnic_pending_aborts_iter_data {
|
||||
int ret;
|
||||
};
|
||||
|
||||
static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
|
||||
void *data, bool reserved)
|
||||
static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
|
||||
{
|
||||
struct fnic_pending_aborts_iter_data *iter_data = data;
|
||||
struct fnic *fnic = iter_data->fnic;
|
||||
@ -2668,8 +2665,7 @@ call_fc_exch_mgr_reset:
|
||||
|
||||
}
|
||||
|
||||
static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data,
|
||||
bool reserved)
|
||||
static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
|
||||
{
|
||||
struct fnic_pending_aborts_iter_data *iter_data = data;
|
||||
struct fnic *fnic = iter_data->fnic;
|
||||
|
@ -566,8 +566,7 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_host_get);
|
||||
|
||||
static bool scsi_host_check_in_flight(struct request *rq, void *data,
|
||||
bool reserved)
|
||||
static bool scsi_host_check_in_flight(struct request *rq, void *data)
|
||||
{
|
||||
int *count = data;
|
||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
@ -662,7 +661,7 @@ void scsi_flush_work(struct Scsi_Host *shost)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_flush_work);
|
||||
|
||||
static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
|
||||
static bool complete_all_cmds_iter(struct request *rq, void *data)
|
||||
{
|
||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
|
||||
enum scsi_host_status status = *(enum scsi_host_status *)data;
|
||||
@ -693,17 +692,16 @@ void scsi_host_complete_all_commands(struct Scsi_Host *shost,
|
||||
EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
|
||||
|
||||
struct scsi_host_busy_iter_data {
|
||||
bool (*fn)(struct scsi_cmnd *, void *, bool);
|
||||
bool (*fn)(struct scsi_cmnd *, void *);
|
||||
void *priv;
|
||||
};
|
||||
|
||||
static bool __scsi_host_busy_iter_fn(struct request *req, void *priv,
|
||||
bool reserved)
|
||||
static bool __scsi_host_busy_iter_fn(struct request *req, void *priv)
|
||||
{
|
||||
struct scsi_host_busy_iter_data *iter_data = priv;
|
||||
struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
|
||||
|
||||
return iter_data->fn(sc, iter_data->priv, reserved);
|
||||
return iter_data->fn(sc, iter_data->priv);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -716,7 +714,7 @@ static bool __scsi_host_busy_iter_fn(struct request *req, void *priv,
|
||||
* ithas to be provided by the caller
|
||||
**/
|
||||
void scsi_host_busy_iter(struct Scsi_Host *shost,
|
||||
bool (*fn)(struct scsi_cmnd *, void *, bool),
|
||||
bool (*fn)(struct scsi_cmnd *, void *),
|
||||
void *priv)
|
||||
{
|
||||
struct scsi_host_busy_iter_data iter_data = {
|
||||
|
@ -381,14 +381,12 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
|
||||
* mpi3mr_print_scmd - print individual SCSI command
|
||||
* @rq: Block request
|
||||
* @data: Adapter instance reference
|
||||
* @reserved: N/A. Currently not used
|
||||
*
|
||||
* Print the SCSI command details if it is in LLD scope.
|
||||
*
|
||||
* Return: true always.
|
||||
*/
|
||||
static bool mpi3mr_print_scmd(struct request *rq,
|
||||
void *data, bool reserved)
|
||||
static bool mpi3mr_print_scmd(struct request *rq, void *data)
|
||||
{
|
||||
struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
|
||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
|
||||
@ -412,7 +410,6 @@ out:
|
||||
* mpi3mr_flush_scmd - Flush individual SCSI command
|
||||
* @rq: Block request
|
||||
* @data: Adapter instance reference
|
||||
* @reserved: N/A. Currently not used
|
||||
*
|
||||
* Return the SCSI command to the upper layers if it is in LLD
|
||||
* scope.
|
||||
@ -420,8 +417,7 @@ out:
|
||||
* Return: true always.
|
||||
*/
|
||||
|
||||
static bool mpi3mr_flush_scmd(struct request *rq,
|
||||
void *data, bool reserved)
|
||||
static bool mpi3mr_flush_scmd(struct request *rq, void *data)
|
||||
{
|
||||
struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
|
||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
|
||||
@ -451,7 +447,6 @@ out:
|
||||
* mpi3mr_count_dev_pending - Count commands pending for a lun
|
||||
* @rq: Block request
|
||||
* @data: SCSI device reference
|
||||
* @reserved: Unused
|
||||
*
|
||||
* This is an iterator function called for each SCSI command in
|
||||
* a host and if the command is pending in the LLD for the
|
||||
@ -461,8 +456,7 @@ out:
|
||||
* Return: true always.
|
||||
*/
|
||||
|
||||
static bool mpi3mr_count_dev_pending(struct request *rq,
|
||||
void *data, bool reserved)
|
||||
static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
|
||||
{
|
||||
struct scsi_device *sdev = (struct scsi_device *)data;
|
||||
struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
|
||||
@ -485,7 +479,6 @@ out:
|
||||
* mpi3mr_count_tgt_pending - Count commands pending for target
|
||||
* @rq: Block request
|
||||
* @data: SCSI target reference
|
||||
* @reserved: Unused
|
||||
*
|
||||
* This is an iterator function called for each SCSI command in
|
||||
* a host and if the command is pending in the LLD for the
|
||||
@ -495,8 +488,7 @@ out:
|
||||
* Return: true always.
|
||||
*/
|
||||
|
||||
static bool mpi3mr_count_tgt_pending(struct request *rq,
|
||||
void *data, bool reserved)
|
||||
static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
|
||||
{
|
||||
struct scsi_target *starget = (struct scsi_target *)data;
|
||||
struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
|
||||
|
@ -520,7 +520,7 @@ struct blk_mq_queue_data {
|
||||
bool last;
|
||||
};
|
||||
|
||||
typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
|
||||
typedef bool (busy_tag_iter_fn)(struct request *, void *);
|
||||
|
||||
/**
|
||||
* struct blk_mq_ops - Callback functions that implements block driver
|
||||
|
@ -786,7 +786,7 @@ extern int scsi_host_block(struct Scsi_Host *shost);
|
||||
extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
|
||||
|
||||
void scsi_host_busy_iter(struct Scsi_Host *,
|
||||
bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv);
|
||||
bool (*fn)(struct scsi_cmnd *, void *), void *priv);
|
||||
|
||||
struct class_container;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user