forked from Minki/linux
nvme: untangle 0 and BLK_MQ_RQ_QUEUE_OK
Let's not depend on any of the BLK_MQ_RQ_QUEUE_* constants having specific values. No functional change. Signed-off-by: Omar Sandoval <osandov@fb.com> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
b4a567e811
commit
bac0000af5
@ -269,7 +269,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
||||
*/
|
||||
req->__data_len = nr_bytes;
|
||||
|
||||
return 0;
|
||||
return BLK_MQ_RQ_QUEUE_OK;
|
||||
}
|
||||
|
||||
static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
|
||||
@ -317,7 +317,7 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
|
||||
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
||||
struct nvme_command *cmd)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret = BLK_MQ_RQ_QUEUE_OK;
|
||||
|
||||
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
|
||||
memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
|
||||
|
@ -328,7 +328,7 @@ static int nvme_init_iod(struct request *rq, unsigned size,
|
||||
rq->retries = 0;
|
||||
rq->rq_flags |= RQF_DONTPREP;
|
||||
}
|
||||
return 0;
|
||||
return BLK_MQ_RQ_QUEUE_OK;
|
||||
}
|
||||
|
||||
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
|
||||
@ -598,17 +598,17 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
||||
map_len = nvme_map_len(req);
|
||||
ret = nvme_init_iod(req, map_len, dev);
|
||||
if (ret)
|
||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
||||
return ret;
|
||||
|
||||
ret = nvme_setup_cmd(ns, req, &cmnd);
|
||||
if (ret)
|
||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
||||
goto out;
|
||||
|
||||
if (req->nr_phys_segments)
|
||||
ret = nvme_map_data(dev, req, map_len, &cmnd);
|
||||
|
||||
if (ret)
|
||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
||||
goto out;
|
||||
|
||||
cmnd.common.command_id = req->tag;
|
||||
|
@ -1395,7 +1395,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
sizeof(struct nvme_command), DMA_TO_DEVICE);
|
||||
|
||||
ret = nvme_setup_cmd(ns, rq, c);
|
||||
if (ret)
|
||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
||||
return ret;
|
||||
|
||||
c->common.command_id = rq->tag;
|
||||
|
@ -169,7 +169,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
int ret;
|
||||
|
||||
ret = nvme_setup_cmd(ns, req, &iod->cmd);
|
||||
if (ret)
|
||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
||||
return ret;
|
||||
|
||||
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
|
||||
@ -179,7 +179,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
nvme_cleanup_cmd(req);
|
||||
blk_mq_start_request(req);
|
||||
nvme_loop_queue_response(&iod->req);
|
||||
return 0;
|
||||
return BLK_MQ_RQ_QUEUE_OK;
|
||||
}
|
||||
|
||||
if (blk_rq_bytes(req)) {
|
||||
@ -198,7 +198,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
blk_mq_start_request(req);
|
||||
|
||||
schedule_work(&iod->work);
|
||||
return 0;
|
||||
return BLK_MQ_RQ_QUEUE_OK;
|
||||
}
|
||||
|
||||
static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
|
||||
|
Loading…
Reference in New Issue
Block a user