mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
for-5.19/block-exec-2022-06-02
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmKZmh0QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpqg6EACCbkwoH7rrr38iU++xP3c9oqFJCfYR95ho qn9/3FiPulua0Dwg8Fbp12ubqqBy/iNj+4Mk7XTo28P7ahjGtKJec2DZguDuHC5X G3kucgQcDOLs1IMWoil+KrnjGC8qeT9ZPFNaUF0IY084NPxnj1wAOjo1J00QVieN WFgHX1sxzBje8abebf3UxAyXImzfyY2uXbp1F3thzf0ZwHXkSDsbWI3fvpdYF4QC p3z6CX0sR+5v7ZLWF3X6H8MBSO+eRlprYji3O/0jVslLBAS8FlTdizQtzx7C6Hsv JZVY4ZsUswYxtsHCBR0McglDeu/iXZRZ9HX4iiOobYJNaXfycMltvS+4Tb/TsFTB GaG6tbL4JS+NT063ctl5h355vUVhIbw6qEhsiF47+0hgawvRr/xxP0aSq1MPmfjw OgG4Jn0htXF47tpKnszfaj3BmgvgV56mV0IGwF5Sh5NXDnHF+MHFmrhRsP1NenjL 12FTnvWGYyTRGDVIVFDkuwaI9o9iNKdFw0JkKIZa/G5RVmmjukvMGTvvSnKmSxJg dgbYLSBA2ZxCcIPjJDvZroe3QxvGNyqUYtxwyWl4a1HK/qljZfwwyRE1rfjJ47hK F8jNEkOThcjr1anoV2nvSLE1mM3SyA/UqDsntIdwnUlG/ObYsByTgtKc+ETs1RzS 8Ovp6lv0Mw== =lBeb -----END PGP SIGNATURE----- Merge tag 'for-5.19/block-exec-2022-06-02' of git://git.kernel.dk/linux-block Pull block request execute cleanups from Jens Axboe: "This change was advertised in the initial core block pull request, but didn't actually make that branch as we deferred it to a post-merge pull request to avoid a bunch of cross branch issues. This series cleans up the block execute path quite nicely" * tag 'for-5.19/block-exec-2022-06-02' of git://git.kernel.dk/linux-block: blk-mq: remove the done argument to blk_execute_rq_nowait blk-mq: avoid a mess of casts for blk_end_sync_rq blk-mq: remove __blk_execute_rq_nowait
This commit is contained in:
commit
72fbbc3d0e
109
block/blk-mq.c
109
block/blk-mq.c
@ -1152,24 +1152,6 @@ void blk_mq_start_request(struct request *rq)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_start_request);
|
EXPORT_SYMBOL(blk_mq_start_request);
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_end_sync_rq - executes a completion event on a request
|
|
||||||
* @rq: request to complete
|
|
||||||
* @error: end I/O status of the request
|
|
||||||
*/
|
|
||||||
static void blk_end_sync_rq(struct request *rq, blk_status_t error)
|
|
||||||
{
|
|
||||||
struct completion *waiting = rq->end_io_data;
|
|
||||||
|
|
||||||
rq->end_io_data = (void *)(uintptr_t)error;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* complete last, if this is a stack request the process (and thus
|
|
||||||
* the rq pointer) could be invalid right after this complete()
|
|
||||||
*/
|
|
||||||
complete(waiting);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
|
* Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
|
||||||
* queues. This is important for md arrays to benefit from merging
|
* queues. This is important for md arrays to benefit from merging
|
||||||
@ -1204,33 +1186,10 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
|
|||||||
plug->rq_count++;
|
plug->rq_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
|
|
||||||
rq_end_io_fn *done, bool use_plug)
|
|
||||||
{
|
|
||||||
WARN_ON(irqs_disabled());
|
|
||||||
WARN_ON(!blk_rq_is_passthrough(rq));
|
|
||||||
|
|
||||||
rq->end_io = done;
|
|
||||||
|
|
||||||
blk_account_io_start(rq);
|
|
||||||
|
|
||||||
if (use_plug && current->plug) {
|
|
||||||
blk_add_rq_to_plug(current->plug, rq);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* don't check dying flag for MQ because the request won't
|
|
||||||
* be reused after dying flag is set
|
|
||||||
*/
|
|
||||||
blk_mq_sched_insert_request(rq, at_head, true, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_execute_rq_nowait - insert a request to I/O scheduler for execution
|
* blk_execute_rq_nowait - insert a request to I/O scheduler for execution
|
||||||
* @rq: request to insert
|
* @rq: request to insert
|
||||||
* @at_head: insert request at head or tail of queue
|
* @at_head: insert request at head or tail of queue
|
||||||
* @done: I/O completion handler
|
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* Insert a fully prepared request at the back of the I/O scheduler queue
|
* Insert a fully prepared request at the back of the I/O scheduler queue
|
||||||
@ -1239,13 +1198,32 @@ static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
|
|||||||
* Note:
|
* Note:
|
||||||
* This function will invoke @done directly if the queue is dead.
|
* This function will invoke @done directly if the queue is dead.
|
||||||
*/
|
*/
|
||||||
void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
|
void blk_execute_rq_nowait(struct request *rq, bool at_head)
|
||||||
{
|
{
|
||||||
__blk_execute_rq_nowait(rq, at_head, done, true);
|
WARN_ON(irqs_disabled());
|
||||||
|
WARN_ON(!blk_rq_is_passthrough(rq));
|
||||||
|
|
||||||
|
blk_account_io_start(rq);
|
||||||
|
if (current->plug)
|
||||||
|
blk_add_rq_to_plug(current->plug, rq);
|
||||||
|
else
|
||||||
|
blk_mq_sched_insert_request(rq, at_head, true, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
||||||
|
|
||||||
|
struct blk_rq_wait {
|
||||||
|
struct completion done;
|
||||||
|
blk_status_t ret;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
|
||||||
|
{
|
||||||
|
struct blk_rq_wait *wait = rq->end_io_data;
|
||||||
|
|
||||||
|
wait->ret = ret;
|
||||||
|
complete(&wait->done);
|
||||||
|
}
|
||||||
|
|
||||||
static bool blk_rq_is_poll(struct request *rq)
|
static bool blk_rq_is_poll(struct request *rq)
|
||||||
{
|
{
|
||||||
if (!rq->mq_hctx)
|
if (!rq->mq_hctx)
|
||||||
@ -1277,30 +1255,37 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
|
|||||||
*/
|
*/
|
||||||
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
|
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
|
||||||
{
|
{
|
||||||
DECLARE_COMPLETION_ONSTACK(wait);
|
struct blk_rq_wait wait = {
|
||||||
unsigned long hang_check;
|
.done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
|
||||||
|
};
|
||||||
|
|
||||||
|
WARN_ON(irqs_disabled());
|
||||||
|
WARN_ON(!blk_rq_is_passthrough(rq));
|
||||||
|
|
||||||
/*
|
|
||||||
* iopoll requires request to be submitted to driver, so can't
|
|
||||||
* use plug
|
|
||||||
*/
|
|
||||||
rq->end_io_data = &wait;
|
rq->end_io_data = &wait;
|
||||||
__blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq,
|
rq->end_io = blk_end_sync_rq;
|
||||||
!blk_rq_is_poll(rq));
|
|
||||||
|
|
||||||
/* Prevent hang_check timer from firing at us during very long I/O */
|
blk_account_io_start(rq);
|
||||||
hang_check = sysctl_hung_task_timeout_secs;
|
blk_mq_sched_insert_request(rq, at_head, true, false);
|
||||||
|
|
||||||
if (blk_rq_is_poll(rq))
|
if (blk_rq_is_poll(rq)) {
|
||||||
blk_rq_poll_completion(rq, &wait);
|
blk_rq_poll_completion(rq, &wait.done);
|
||||||
else if (hang_check)
|
} else {
|
||||||
while (!wait_for_completion_io_timeout(&wait,
|
/*
|
||||||
hang_check * (HZ/2)))
|
* Prevent hang_check timer from firing at us during very long
|
||||||
;
|
* I/O
|
||||||
else
|
*/
|
||||||
wait_for_completion_io(&wait);
|
unsigned long hang_check = sysctl_hung_task_timeout_secs;
|
||||||
|
|
||||||
return (blk_status_t)(uintptr_t)rq->end_io_data;
|
if (hang_check)
|
||||||
|
while (!wait_for_completion_io_timeout(&wait.done,
|
||||||
|
hang_check * (HZ/2)))
|
||||||
|
;
|
||||||
|
else
|
||||||
|
wait_for_completion_io(&wait.done);
|
||||||
|
}
|
||||||
|
|
||||||
|
return wait.ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_execute_rq);
|
EXPORT_SYMBOL(blk_execute_rq);
|
||||||
|
|
||||||
|
@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
|
|||||||
spin_unlock_irq(&host->lock);
|
spin_unlock_irq(&host->lock);
|
||||||
|
|
||||||
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
|
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
|
||||||
blk_execute_rq_nowait(rq, true, NULL);
|
blk_execute_rq_nowait(rq, true);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
|
|||||||
crq->msg_bucket = (u32) rc;
|
crq->msg_bucket = (u32) rc;
|
||||||
|
|
||||||
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
|
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
|
||||||
blk_execute_rq_nowait(rq, true, NULL);
|
blk_execute_rq_nowait(rq, true);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1206,9 +1206,10 @@ static void nvme_keep_alive_work(struct work_struct *work)
|
|||||||
nvme_init_request(rq, &ctrl->ka_cmd);
|
nvme_init_request(rq, &ctrl->ka_cmd);
|
||||||
|
|
||||||
rq->timeout = ctrl->kato * HZ;
|
rq->timeout = ctrl->kato * HZ;
|
||||||
|
rq->end_io = nvme_keep_alive_end_io;
|
||||||
rq->end_io_data = ctrl;
|
rq->end_io_data = ctrl;
|
||||||
rq->rq_flags |= RQF_QUIET;
|
rq->rq_flags |= RQF_QUIET;
|
||||||
blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
|
blk_execute_rq_nowait(rq, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
|
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
|
||||||
|
@ -453,6 +453,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
|||||||
blk_flags);
|
blk_flags);
|
||||||
if (IS_ERR(req))
|
if (IS_ERR(req))
|
||||||
return PTR_ERR(req);
|
return PTR_ERR(req);
|
||||||
|
req->end_io = nvme_uring_cmd_end_io;
|
||||||
req->end_io_data = ioucmd;
|
req->end_io_data = ioucmd;
|
||||||
|
|
||||||
/* to free bio on completion, as req->bio will be null at that time */
|
/* to free bio on completion, as req->bio will be null at that time */
|
||||||
@ -461,7 +462,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
|||||||
pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
|
pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
|
||||||
pdu->meta_len = d.metadata_len;
|
pdu->meta_len = d.metadata_len;
|
||||||
|
|
||||||
blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io);
|
blk_execute_rq_nowait(req, false);
|
||||||
return -EIOCBQUEUED;
|
return -EIOCBQUEUED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1438,9 +1438,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
|||||||
}
|
}
|
||||||
nvme_init_request(abort_req, &cmd);
|
nvme_init_request(abort_req, &cmd);
|
||||||
|
|
||||||
|
abort_req->end_io = abort_endio;
|
||||||
abort_req->end_io_data = NULL;
|
abort_req->end_io_data = NULL;
|
||||||
abort_req->rq_flags |= RQF_QUIET;
|
abort_req->rq_flags |= RQF_QUIET;
|
||||||
blk_execute_rq_nowait(abort_req, false, abort_endio);
|
blk_execute_rq_nowait(abort_req, false);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The aborted req will be completed on receiving the abort req.
|
* The aborted req will be completed on receiving the abort req.
|
||||||
@ -2485,12 +2486,15 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
|
|||||||
return PTR_ERR(req);
|
return PTR_ERR(req);
|
||||||
nvme_init_request(req, &cmd);
|
nvme_init_request(req, &cmd);
|
||||||
|
|
||||||
|
if (opcode == nvme_admin_delete_cq)
|
||||||
|
req->end_io = nvme_del_cq_end;
|
||||||
|
else
|
||||||
|
req->end_io = nvme_del_queue_end;
|
||||||
req->end_io_data = nvmeq;
|
req->end_io_data = nvmeq;
|
||||||
|
|
||||||
init_completion(&nvmeq->delete_done);
|
init_completion(&nvmeq->delete_done);
|
||||||
req->rq_flags |= RQF_QUIET;
|
req->rq_flags |= RQF_QUIET;
|
||||||
blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
|
blk_execute_rq_nowait(req, false);
|
||||||
nvme_del_cq_end : nvme_del_queue_end);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,8 +285,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
|
|||||||
req->p.rq = rq;
|
req->p.rq = rq;
|
||||||
queue_work(nvmet_wq, &req->p.work);
|
queue_work(nvmet_wq, &req->p.work);
|
||||||
} else {
|
} else {
|
||||||
|
rq->end_io = nvmet_passthru_req_done;
|
||||||
rq->end_io_data = req;
|
rq->end_io_data = req;
|
||||||
blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
|
blk_execute_rq_nowait(rq, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ns)
|
if (ns)
|
||||||
|
@ -2039,12 +2039,13 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
|
|||||||
scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
|
scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
|
||||||
scmd->cmnd[5] = 0;
|
scmd->cmnd[5] = 0;
|
||||||
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
|
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
|
||||||
|
scmd->allowed = 5;
|
||||||
|
|
||||||
req->rq_flags |= RQF_QUIET;
|
req->rq_flags |= RQF_QUIET;
|
||||||
req->timeout = 10 * HZ;
|
req->timeout = 10 * HZ;
|
||||||
scmd->allowed = 5;
|
req->end_io = eh_lock_door_done;
|
||||||
|
|
||||||
blk_execute_rq_nowait(req, true, eh_lock_door_done);
|
blk_execute_rq_nowait(req, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -831,7 +831,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
|
|||||||
|
|
||||||
srp->rq->timeout = timeout;
|
srp->rq->timeout = timeout;
|
||||||
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
|
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
|
||||||
blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io);
|
srp->rq->end_io = sg_rq_end_io;
|
||||||
|
blk_execute_rq_nowait(srp->rq, at_head);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -579,9 +579,10 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
|
|||||||
memcpy(scmd->cmnd, cmd, scmd->cmd_len);
|
memcpy(scmd->cmnd, cmd, scmd->cmd_len);
|
||||||
req->timeout = timeout;
|
req->timeout = timeout;
|
||||||
scmd->allowed = retries;
|
scmd->allowed = retries;
|
||||||
|
req->end_io = st_scsi_execute_end;
|
||||||
req->end_io_data = SRpnt;
|
req->end_io_data = SRpnt;
|
||||||
|
|
||||||
blk_execute_rq_nowait(req, true, st_scsi_execute_end);
|
blk_execute_rq_nowait(req, true);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -671,11 +671,12 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
|
|||||||
|
|
||||||
req->timeout = 0;
|
req->timeout = 0;
|
||||||
req->end_io_data = umap_req;
|
req->end_io_data = umap_req;
|
||||||
|
req->end_io = ufshpb_umap_req_compl_fn;
|
||||||
|
|
||||||
ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
|
ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
|
||||||
scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
|
scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
|
||||||
|
|
||||||
blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
|
blk_execute_rq_nowait(req, true);
|
||||||
|
|
||||||
hpb->stats.umap_req_cnt++;
|
hpb->stats.umap_req_cnt++;
|
||||||
}
|
}
|
||||||
@ -707,6 +708,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
|
|||||||
blk_rq_append_bio(req, map_req->bio);
|
blk_rq_append_bio(req, map_req->bio);
|
||||||
|
|
||||||
req->end_io_data = map_req;
|
req->end_io_data = map_req;
|
||||||
|
req->end_io = ufshpb_map_req_compl_fn;
|
||||||
|
|
||||||
if (unlikely(last))
|
if (unlikely(last))
|
||||||
mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
|
mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
|
||||||
@ -716,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
|
|||||||
map_req->rb.srgn_idx, mem_size);
|
map_req->rb.srgn_idx, mem_size);
|
||||||
scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
|
scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
|
||||||
|
|
||||||
blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
|
blk_execute_rq_nowait(req, true);
|
||||||
|
|
||||||
hpb->stats.map_req_cnt++;
|
hpb->stats.map_req_cnt++;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -972,8 +972,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
|
|||||||
|
|
||||||
cmd->priv = scmd->cmnd;
|
cmd->priv = scmd->cmnd;
|
||||||
|
|
||||||
blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
|
blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG);
|
||||||
pscsi_req_done);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -969,8 +969,7 @@ int blk_rq_unmap_user(struct bio *);
|
|||||||
int blk_rq_map_kern(struct request_queue *, struct request *, void *,
|
int blk_rq_map_kern(struct request_queue *, struct request *, void *,
|
||||||
unsigned int, gfp_t);
|
unsigned int, gfp_t);
|
||||||
int blk_rq_append_bio(struct request *rq, struct bio *bio);
|
int blk_rq_append_bio(struct request *rq, struct bio *bio);
|
||||||
void blk_execute_rq_nowait(struct request *rq, bool at_head,
|
void blk_execute_rq_nowait(struct request *rq, bool at_head);
|
||||||
rq_end_io_fn *end_io);
|
|
||||||
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
|
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
|
||||||
|
|
||||||
struct req_iterator {
|
struct req_iterator {
|
||||||
|
Loading…
Reference in New Issue
Block a user