mirror of
https://github.com/torvalds/linux.git
synced 2024-12-05 18:41:23 +00:00
nvme: use blk_mq_complete_request_remote to avoid an indirect function call
Use the new blk_mq_complete_request_remote helper to avoid an indirect function call in the completion fast path. Reviewed-by: Daniel Wagner <dwagner@suse.de> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
8446546cc2
commit
ff02945149
@ -227,6 +227,7 @@ static DECLARE_COMPLETION(nvme_fc_unload_proceed);
|
|||||||
*/
|
*/
|
||||||
static struct device *fc_udev_device;
|
static struct device *fc_udev_device;
|
||||||
|
|
||||||
|
static void nvme_fc_complete_rq(struct request *rq);
|
||||||
|
|
||||||
/* *********************** FC-NVME Port Management ************************ */
|
/* *********************** FC-NVME Port Management ************************ */
|
||||||
|
|
||||||
@ -2033,7 +2034,8 @@ done:
|
|||||||
}
|
}
|
||||||
|
|
||||||
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
|
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
|
||||||
nvme_end_request(rq, status, result);
|
if (!nvme_end_request(rq, status, result))
|
||||||
|
nvme_fc_complete_rq(rq);
|
||||||
|
|
||||||
check_error:
|
check_error:
|
||||||
if (terminate_assoc)
|
if (terminate_assoc)
|
||||||
|
@ -472,7 +472,7 @@ static inline u32 nvme_bytes_to_numd(size_t len)
|
|||||||
return (len >> 2) - 1;
|
return (len >> 2) - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nvme_end_request(struct request *req, __le16 status,
|
static inline bool nvme_end_request(struct request *req, __le16 status,
|
||||||
union nvme_result result)
|
union nvme_result result)
|
||||||
{
|
{
|
||||||
struct nvme_request *rq = nvme_req(req);
|
struct nvme_request *rq = nvme_req(req);
|
||||||
@ -481,8 +481,9 @@ static inline void nvme_end_request(struct request *req, __le16 status,
|
|||||||
rq->result = result;
|
rq->result = result;
|
||||||
/* inject error when permitted by fault injection framework */
|
/* inject error when permitted by fault injection framework */
|
||||||
nvme_should_fail(req);
|
nvme_should_fail(req);
|
||||||
if (likely(!blk_should_fake_timeout(req->q)))
|
if (unlikely(blk_should_fake_timeout(req->q)))
|
||||||
blk_mq_complete_request(req);
|
return true;
|
||||||
|
return blk_mq_complete_request_remote(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
|
static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
|
||||||
|
@ -963,7 +963,8 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
|||||||
|
|
||||||
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
|
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
|
||||||
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
|
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
|
||||||
nvme_end_request(req, cqe->status, cqe->result);
|
if (!nvme_end_request(req, cqe->status, cqe->result))
|
||||||
|
nvme_pci_complete_rq(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
|
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
|
||||||
|
@ -149,6 +149,7 @@ MODULE_PARM_DESC(register_always,
|
|||||||
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
||||||
struct rdma_cm_event *event);
|
struct rdma_cm_event *event);
|
||||||
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
|
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
|
||||||
|
static void nvme_rdma_complete_rq(struct request *rq);
|
||||||
|
|
||||||
static const struct blk_mq_ops nvme_rdma_mq_ops;
|
static const struct blk_mq_ops nvme_rdma_mq_ops;
|
||||||
static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
|
static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
|
||||||
@ -1155,7 +1156,8 @@ static void nvme_rdma_end_request(struct nvme_rdma_request *req)
|
|||||||
|
|
||||||
if (!refcount_dec_and_test(&req->ref))
|
if (!refcount_dec_and_test(&req->ref))
|
||||||
return;
|
return;
|
||||||
nvme_end_request(rq, req->status, req->result);
|
if (!nvme_end_request(rq, req->status, req->result))
|
||||||
|
nvme_rdma_complete_rq(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
|
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
|
||||||
|
@ -464,7 +464,8 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
nvme_end_request(rq, cqe->status, cqe->result);
|
if (!nvme_end_request(rq, cqe->status, cqe->result))
|
||||||
|
nvme_complete_rq(rq);
|
||||||
queue->nr_cqe++;
|
queue->nr_cqe++;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -654,7 +655,8 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status)
|
|||||||
{
|
{
|
||||||
union nvme_result res = {};
|
union nvme_result res = {};
|
||||||
|
|
||||||
nvme_end_request(rq, cpu_to_le16(status << 1), res);
|
if (!nvme_end_request(rq, cpu_to_le16(status << 1), res))
|
||||||
|
nvme_complete_rq(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
|
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
|
||||||
|
@ -116,7 +116,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
nvme_end_request(rq, cqe->status, cqe->result);
|
if (!nvme_end_request(rq, cqe->status, cqe->result))
|
||||||
|
nvme_loop_complete_rq(rq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user