mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 18:13:04 +00:00
Merge branch 'nvme-4.18' of git://git.infradead.org/nvme into for-linus
Pull NVMe fixes from Christoph: "Two small fixes each for the FC code and the target." * 'nvme-4.18' of git://git.infradead.org/nvme: nvmet: only check for filebacking on -ENOTBLK nvmet: fixup crash on NULL device path nvme: if_ready checks to fail io to deleting controller nvmet-fc: fix target sgl list on large transfers
This commit is contained in:
commit
78e18063a9
@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
|
|||||||
/*
|
/*
|
||||||
* For something we're not in a state to send to the device the default action
|
* For something we're not in a state to send to the device the default action
|
||||||
* is to busy it and retry it after the controller state is recovered. However,
|
* is to busy it and retry it after the controller state is recovered. However,
|
||||||
* anything marked for failfast or nvme multipath is immediately failed.
|
* if the controller is deleting or if anything is marked for failfast or
|
||||||
|
* nvme multipath it is immediately failed.
|
||||||
*
|
*
|
||||||
* Note: commands used to initialize the controller will be marked for failfast.
|
* Note: commands used to initialize the controller will be marked for failfast.
|
||||||
* Note: nvme cli/ioctl commands are marked for failfast.
|
* Note: nvme cli/ioctl commands are marked for failfast.
|
||||||
*/
|
*/
|
||||||
blk_status_t nvmf_fail_nonready_command(struct request *rq)
|
blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
|
||||||
|
struct request *rq)
|
||||||
{
|
{
|
||||||
if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
|
if (ctrl->state != NVME_CTRL_DELETING &&
|
||||||
|
ctrl->state != NVME_CTRL_DEAD &&
|
||||||
|
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
|
||||||
return BLK_STS_RESOURCE;
|
return BLK_STS_RESOURCE;
|
||||||
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
|
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
|
||||||
return BLK_STS_IOERR;
|
return BLK_STS_IOERR;
|
||||||
|
@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
|
|||||||
void nvmf_free_options(struct nvmf_ctrl_options *opts);
|
void nvmf_free_options(struct nvmf_ctrl_options *opts);
|
||||||
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
|
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
|
||||||
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
|
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
|
||||||
blk_status_t nvmf_fail_nonready_command(struct request *rq);
|
blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
|
||||||
|
struct request *rq);
|
||||||
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
|
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
|
||||||
bool queue_live);
|
bool queue_live);
|
||||||
|
|
||||||
|
@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
|
|
||||||
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
|
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
|
||||||
!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
|
!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
|
||||||
return nvmf_fail_nonready_command(rq);
|
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
|
||||||
|
|
||||||
ret = nvme_setup_cmd(ns, rq, sqe);
|
ret = nvme_setup_cmd(ns, rq, sqe);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1639,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
WARN_ON_ONCE(rq->tag < 0);
|
WARN_ON_ONCE(rq->tag < 0);
|
||||||
|
|
||||||
if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
|
if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
|
||||||
return nvmf_fail_nonready_command(rq);
|
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
|
||||||
|
|
||||||
dev = queue->device->dev;
|
dev = queue->device->dev;
|
||||||
ib_dma_sync_single_for_cpu(dev, sqe->dma,
|
ib_dma_sync_single_for_cpu(dev, sqe->dma,
|
||||||
|
@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
|
|||||||
{
|
{
|
||||||
struct nvmet_ns *ns = to_nvmet_ns(item);
|
struct nvmet_ns *ns = to_nvmet_ns(item);
|
||||||
struct nvmet_subsys *subsys = ns->subsys;
|
struct nvmet_subsys *subsys = ns->subsys;
|
||||||
|
size_t len;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&subsys->lock);
|
mutex_lock(&subsys->lock);
|
||||||
@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
|
|||||||
if (ns->enabled)
|
if (ns->enabled)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
kfree(ns->device_path);
|
ret = -EINVAL;
|
||||||
|
len = strcspn(page, "\n");
|
||||||
|
if (!len)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
kfree(ns->device_path);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL);
|
ns->device_path = kstrndup(page, len, GFP_KERNEL);
|
||||||
if (!ns->device_path)
|
if (!ns->device_path)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
ret = nvmet_bdev_ns_enable(ns);
|
ret = nvmet_bdev_ns_enable(ns);
|
||||||
if (ret)
|
if (ret == -ENOTBLK)
|
||||||
ret = nvmet_file_ns_enable(ns);
|
ret = nvmet_file_ns_enable(ns);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
|
|||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
} __aligned(sizeof(unsigned long long));
|
} __aligned(sizeof(unsigned long long));
|
||||||
|
|
||||||
|
/* desired maximum for a single sequence - if sg list allows it */
|
||||||
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
|
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
|
||||||
#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
|
|
||||||
|
|
||||||
enum nvmet_fcp_datadir {
|
enum nvmet_fcp_datadir {
|
||||||
NVMET_FCP_NODATA,
|
NVMET_FCP_NODATA,
|
||||||
@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
|
|||||||
struct nvme_fc_cmd_iu cmdiubuf;
|
struct nvme_fc_cmd_iu cmdiubuf;
|
||||||
struct nvme_fc_ersp_iu rspiubuf;
|
struct nvme_fc_ersp_iu rspiubuf;
|
||||||
dma_addr_t rspdma;
|
dma_addr_t rspdma;
|
||||||
|
struct scatterlist *next_sg;
|
||||||
struct scatterlist *data_sg;
|
struct scatterlist *data_sg;
|
||||||
int data_sg_cnt;
|
int data_sg_cnt;
|
||||||
u32 offset;
|
u32 offset;
|
||||||
@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
|
|||||||
INIT_LIST_HEAD(&newrec->assoc_list);
|
INIT_LIST_HEAD(&newrec->assoc_list);
|
||||||
kref_init(&newrec->ref);
|
kref_init(&newrec->ref);
|
||||||
ida_init(&newrec->assoc_cnt);
|
ida_init(&newrec->assoc_cnt);
|
||||||
newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
|
newrec->max_sg_cnt = template->max_sgl_segments;
|
||||||
template->max_sgl_segments);
|
|
||||||
|
|
||||||
ret = nvmet_fc_alloc_ls_iodlist(newrec);
|
ret = nvmet_fc_alloc_ls_iodlist(newrec);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
|
|||||||
((fod->io_dir == NVMET_FCP_WRITE) ?
|
((fod->io_dir == NVMET_FCP_WRITE) ?
|
||||||
DMA_FROM_DEVICE : DMA_TO_DEVICE));
|
DMA_FROM_DEVICE : DMA_TO_DEVICE));
|
||||||
/* note: write from initiator perspective */
|
/* note: write from initiator perspective */
|
||||||
|
fod->next_sg = fod->data_sg;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
|
|||||||
struct nvmet_fc_fcp_iod *fod, u8 op)
|
struct nvmet_fc_fcp_iod *fod, u8 op)
|
||||||
{
|
{
|
||||||
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
|
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
|
||||||
|
struct scatterlist *sg = fod->next_sg;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 tlen;
|
u32 remaininglen = fod->req.transfer_len - fod->offset;
|
||||||
|
u32 tlen = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
fcpreq->op = op;
|
fcpreq->op = op;
|
||||||
fcpreq->offset = fod->offset;
|
fcpreq->offset = fod->offset;
|
||||||
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
|
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
|
||||||
|
|
||||||
tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
|
/*
|
||||||
(fod->req.transfer_len - fod->offset));
|
* for next sequence:
|
||||||
|
* break at a sg element boundary
|
||||||
|
* attempt to keep sequence length capped at
|
||||||
|
* NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
|
||||||
|
* be longer if a single sg element is larger
|
||||||
|
* than that amount. This is done to avoid creating
|
||||||
|
* a new sg list to use for the tgtport api.
|
||||||
|
*/
|
||||||
|
fcpreq->sg = sg;
|
||||||
|
fcpreq->sg_cnt = 0;
|
||||||
|
while (tlen < remaininglen &&
|
||||||
|
fcpreq->sg_cnt < tgtport->max_sg_cnt &&
|
||||||
|
tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
|
||||||
|
fcpreq->sg_cnt++;
|
||||||
|
tlen += sg_dma_len(sg);
|
||||||
|
sg = sg_next(sg);
|
||||||
|
}
|
||||||
|
if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
|
||||||
|
fcpreq->sg_cnt++;
|
||||||
|
tlen += min_t(u32, sg_dma_len(sg), remaininglen);
|
||||||
|
sg = sg_next(sg);
|
||||||
|
}
|
||||||
|
if (tlen < remaininglen)
|
||||||
|
fod->next_sg = sg;
|
||||||
|
else
|
||||||
|
fod->next_sg = NULL;
|
||||||
|
|
||||||
fcpreq->transfer_length = tlen;
|
fcpreq->transfer_length = tlen;
|
||||||
fcpreq->transferred_length = 0;
|
fcpreq->transferred_length = 0;
|
||||||
fcpreq->fcp_error = 0;
|
fcpreq->fcp_error = 0;
|
||||||
fcpreq->rsplen = 0;
|
fcpreq->rsplen = 0;
|
||||||
|
|
||||||
fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
|
|
||||||
fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the last READDATA request: check if LLDD supports
|
* If the last READDATA request: check if LLDD supports
|
||||||
* combined xfr with response.
|
* combined xfr with response.
|
||||||
|
@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
|
if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
|
||||||
return nvmf_fail_nonready_command(req);
|
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
|
||||||
|
|
||||||
ret = nvme_setup_cmd(ns, req, &iod->cmd);
|
ret = nvme_setup_cmd(ns, req, &iod->cmd);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
Loading…
Reference in New Issue
Block a user