forked from Minki/linux
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A set of fixes that should go into this series. This contains: - Fix from Bart for blk-mq requeue queue running, preventing a continued loop of run/restart. - Fix for a bio/blk-integrity issue, in two parts. One from Christoph, fixing where verification happens, and one from Milan, for a NULL profile. - NVMe pull request, most of the changes being for nvme-fc, but also a few trivial core/pci fixes" * 'for-linus' of git://git.kernel.dk/linux-block: nvme: fix directive command numd calculation nvme: fix nvme reset command timeout handling nvme-pci: fix CMB sysfs file removal in reset path lpfc: support nvmet_fc defer_rcv callback nvmet_fc: add defer_req callback for deferment of cmd buffer return nvme: strip trailing 0-bytes in wwid_show block: Make blk_mq_delay_kick_requeue_list() rerun the queue at a quiet time bio-integrity: only verify integrity on the lowest stacked driver bio-integrity: Fix regression if profile verify_fn is NULL
This commit is contained in:
commit
e0d0e045b8
@ -387,9 +387,11 @@ static void bio_integrity_verify_fn(struct work_struct *work)
|
||||
*/
|
||||
bool __bio_integrity_endio(struct bio *bio)
|
||||
{
|
||||
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) {
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
|
||||
(bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
|
||||
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
|
||||
queue_work(kintegrityd_wq, &bip->bip_work);
|
||||
return false;
|
||||
|
@ -684,8 +684,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
|
||||
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
|
||||
unsigned long msecs)
|
||||
{
|
||||
kblockd_schedule_delayed_work(&q->requeue_work,
|
||||
msecs_to_jiffies(msecs));
|
||||
kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
|
||||
msecs_to_jiffies(msecs));
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
|
||||
|
||||
|
@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
|
||||
|
||||
c.directive.opcode = nvme_admin_directive_recv;
|
||||
c.directive.nsid = cpu_to_le32(nsid);
|
||||
c.directive.numd = cpu_to_le32(sizeof(*s));
|
||||
c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
|
||||
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
|
||||
c.directive.dtype = NVME_DIR_STREAMS;
|
||||
|
||||
@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
|
||||
blk_queue_write_cache(q, vwc, vwc);
|
||||
}
|
||||
|
||||
static void nvme_configure_apst(struct nvme_ctrl *ctrl)
|
||||
static int nvme_configure_apst(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
/*
|
||||
* APST (Autonomous Power State Transition) lets us program a
|
||||
@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
|
||||
* then don't do anything.
|
||||
*/
|
||||
if (!ctrl->apsta)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (ctrl->npss > 31) {
|
||||
dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
table = kzalloc(sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
|
||||
/* Turn off APST. */
|
||||
@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
|
||||
dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
|
||||
|
||||
kfree(table);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nvme_set_latency_tolerance(struct device *dev, s32 val)
|
||||
@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
* In fabrics we need to verify the cntlid matches the
|
||||
* admin connect
|
||||
*/
|
||||
if (ctrl->cntlid != le16_to_cpu(id->cntlid))
|
||||
if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
|
||||
dev_err(ctrl->device,
|
||||
"keep-alive support is mandatory for fabrics\n");
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
} else {
|
||||
ctrl->cntlid = le16_to_cpu(id->cntlid);
|
||||
@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
else if (!ctrl->apst_enabled && prev_apst_enabled)
|
||||
dev_pm_qos_hide_latency_tolerance(ctrl->device);
|
||||
|
||||
nvme_configure_apst(ctrl);
|
||||
nvme_configure_directives(ctrl);
|
||||
ret = nvme_configure_apst(ctrl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = nvme_configure_directives(ctrl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ctrl->identified = true;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
kfree(id);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_init_identify);
|
||||
@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
|
||||
if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
|
||||
return sprintf(buf, "eui.%8phN\n", ns->eui);
|
||||
|
||||
while (ctrl->serial[serial_len - 1] == ' ')
|
||||
while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
|
||||
ctrl->serial[serial_len - 1] == '\0'))
|
||||
serial_len--;
|
||||
while (ctrl->model[model_len - 1] == ' ')
|
||||
while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
|
||||
ctrl->model[model_len - 1] == '\0'))
|
||||
model_len--;
|
||||
|
||||
return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
|
||||
|
@ -1558,11 +1558,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
|
||||
if (dev->cmb) {
|
||||
iounmap(dev->cmb);
|
||||
dev->cmb = NULL;
|
||||
if (dev->cmbsz) {
|
||||
sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
|
||||
&dev_attr_cmb.attr, NULL);
|
||||
dev->cmbsz = 0;
|
||||
}
|
||||
sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
|
||||
&dev_attr_cmb.attr, NULL);
|
||||
dev->cmbsz = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1953,16 +1951,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
|
||||
|
||||
/*
|
||||
* CMBs can currently only exist on >=1.2 PCIe devices. We only
|
||||
* populate sysfs if a CMB is implemented. Note that we add the
|
||||
* CMB attribute to the nvme_ctrl kobj which removes the need to remove
|
||||
* it on exit. Since nvme_dev_attrs_group has no name we can pass
|
||||
* NULL as final argument to sysfs_add_file_to_group.
|
||||
* populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
|
||||
* has no name we can pass NULL as final argument to
|
||||
* sysfs_add_file_to_group.
|
||||
*/
|
||||
|
||||
if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
|
||||
dev->cmb = nvme_map_cmb(dev);
|
||||
|
||||
if (dev->cmbsz) {
|
||||
if (dev->cmb) {
|
||||
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
|
||||
&dev_attr_cmb.attr, NULL))
|
||||
dev_warn(dev->ctrl.device,
|
||||
|
@ -114,6 +114,11 @@ struct nvmet_fc_tgtport {
|
||||
struct kref ref;
|
||||
};
|
||||
|
||||
struct nvmet_fc_defer_fcp_req {
|
||||
struct list_head req_list;
|
||||
struct nvmefc_tgt_fcp_req *fcp_req;
|
||||
};
|
||||
|
||||
struct nvmet_fc_tgt_queue {
|
||||
bool ninetypercent;
|
||||
u16 qid;
|
||||
@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue {
|
||||
struct nvmet_fc_tgt_assoc *assoc;
|
||||
struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
|
||||
struct list_head fod_list;
|
||||
struct list_head pending_cmd_list;
|
||||
struct list_head avail_defer_list;
|
||||
struct workqueue_struct *work_q;
|
||||
struct kref ref;
|
||||
} __aligned(sizeof(unsigned long long));
|
||||
@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
|
||||
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
|
||||
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
|
||||
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
|
||||
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
||||
struct nvmet_fc_fcp_iod *fod);
|
||||
|
||||
|
||||
/* *********************** FC-NVME DMA Handling **************************** */
|
||||
@ -463,9 +472,9 @@ static struct nvmet_fc_fcp_iod *
|
||||
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
|
||||
{
|
||||
static struct nvmet_fc_fcp_iod *fod;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&queue->qlock, flags);
|
||||
lockdep_assert_held(&queue->qlock);
|
||||
|
||||
fod = list_first_entry_or_null(&queue->fod_list,
|
||||
struct nvmet_fc_fcp_iod, fcp_list);
|
||||
if (fod) {
|
||||
@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
|
||||
* will "inherit" that reference.
|
||||
*/
|
||||
}
|
||||
spin_unlock_irqrestore(&queue->qlock, flags);
|
||||
return fod;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
|
||||
struct nvmet_fc_tgt_queue *queue,
|
||||
struct nvmefc_tgt_fcp_req *fcpreq)
|
||||
{
|
||||
struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
|
||||
|
||||
/*
|
||||
* put all admin cmds on hw queue id 0. All io commands go to
|
||||
* the respective hw queue based on a modulo basis
|
||||
*/
|
||||
fcpreq->hwqid = queue->qid ?
|
||||
((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
|
||||
|
||||
if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
|
||||
queue_work_on(queue->cpu, queue->work_q, &fod->work);
|
||||
else
|
||||
nvmet_fc_handle_fcp_rqst(tgtport, fod);
|
||||
}
|
||||
|
||||
static void
|
||||
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
|
||||
struct nvmet_fc_fcp_iod *fod)
|
||||
{
|
||||
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
|
||||
struct nvmet_fc_tgtport *tgtport = fod->tgtport;
|
||||
struct nvmet_fc_defer_fcp_req *deferfcp;
|
||||
unsigned long flags;
|
||||
|
||||
fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
|
||||
@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
|
||||
|
||||
fcpreq->nvmet_fc_private = NULL;
|
||||
|
||||
spin_lock_irqsave(&queue->qlock, flags);
|
||||
list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
|
||||
fod->active = false;
|
||||
fod->abort = false;
|
||||
fod->aborted = false;
|
||||
fod->writedataactive = false;
|
||||
fod->fcpreq = NULL;
|
||||
spin_unlock_irqrestore(&queue->qlock, flags);
|
||||
|
||||
/*
|
||||
* release the reference taken at queue lookup and fod allocation
|
||||
*/
|
||||
nvmet_fc_tgt_q_put(queue);
|
||||
|
||||
tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
|
||||
|
||||
spin_lock_irqsave(&queue->qlock, flags);
|
||||
deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
|
||||
struct nvmet_fc_defer_fcp_req, req_list);
|
||||
if (!deferfcp) {
|
||||
list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
|
||||
spin_unlock_irqrestore(&queue->qlock, flags);
|
||||
|
||||
/* Release reference taken at queue lookup and fod allocation */
|
||||
nvmet_fc_tgt_q_put(queue);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Re-use the fod for the next pending cmd that was deferred */
|
||||
list_del(&deferfcp->req_list);
|
||||
|
||||
fcpreq = deferfcp->fcp_req;
|
||||
|
||||
/* deferfcp can be reused for another IO at a later date */
|
||||
list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
|
||||
|
||||
spin_unlock_irqrestore(&queue->qlock, flags);
|
||||
|
||||
/* Save NVME CMD IO in fod */
|
||||
memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
|
||||
|
||||
/* Setup new fcpreq to be processed */
|
||||
fcpreq->rspaddr = NULL;
|
||||
fcpreq->rsplen = 0;
|
||||
fcpreq->nvmet_fc_private = fod;
|
||||
fod->fcpreq = fcpreq;
|
||||
fod->active = true;
|
||||
|
||||
/* inform LLDD IO is now being processed */
|
||||
tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
|
||||
|
||||
/* Submit deferred IO for processing */
|
||||
nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
|
||||
|
||||
/*
|
||||
* Leave the queue lookup get reference taken when
|
||||
* fod was originally allocated.
|
||||
*/
|
||||
}
|
||||
|
||||
static int
|
||||
@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
|
||||
queue->port = assoc->tgtport->port;
|
||||
queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
|
||||
INIT_LIST_HEAD(&queue->fod_list);
|
||||
INIT_LIST_HEAD(&queue->avail_defer_list);
|
||||
INIT_LIST_HEAD(&queue->pending_cmd_list);
|
||||
atomic_set(&queue->connected, 0);
|
||||
atomic_set(&queue->sqtail, 0);
|
||||
atomic_set(&queue->rsn, 1);
|
||||
@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
|
||||
{
|
||||
struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
|
||||
struct nvmet_fc_fcp_iod *fod = queue->fod;
|
||||
struct nvmet_fc_defer_fcp_req *deferfcp;
|
||||
unsigned long flags;
|
||||
int i, writedataactive;
|
||||
bool disconnect;
|
||||
@ -666,6 +733,35 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Cleanup defer'ed IOs in queue */
|
||||
list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {
|
||||
list_del(&deferfcp->req_list);
|
||||
kfree(deferfcp);
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
|
||||
struct nvmet_fc_defer_fcp_req, req_list);
|
||||
if (!deferfcp)
|
||||
break;
|
||||
|
||||
list_del(&deferfcp->req_list);
|
||||
spin_unlock_irqrestore(&queue->qlock, flags);
|
||||
|
||||
tgtport->ops->defer_rcv(&tgtport->fc_target_port,
|
||||
deferfcp->fcp_req);
|
||||
|
||||
tgtport->ops->fcp_abort(&tgtport->fc_target_port,
|
||||
deferfcp->fcp_req);
|
||||
|
||||
tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
|
||||
deferfcp->fcp_req);
|
||||
|
||||
kfree(deferfcp);
|
||||
|
||||
spin_lock_irqsave(&queue->qlock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&queue->qlock, flags);
|
||||
|
||||
flush_workqueue(queue->work_q);
|
||||
@ -2172,11 +2268,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
|
||||
* Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
|
||||
* layer for processing.
|
||||
*
|
||||
* The nvmet-fc layer will copy cmd payload to an internal structure for
|
||||
* processing. As such, upon completion of the routine, the LLDD may
|
||||
* immediately free/reuse the CMD IU buffer passed in the call.
|
||||
* The nvmet_fc layer allocates a local job structure (struct
|
||||
* nvmet_fc_fcp_iod) from the queue for the io and copies the
|
||||
* CMD IU buffer to the job structure. As such, on a successful
|
||||
* completion (returns 0), the LLDD may immediately free/reuse
|
||||
* the CMD IU buffer passed in the call.
|
||||
*
|
||||
* If this routine returns error, the lldd should abort the exchange.
|
||||
* However, in some circumstances, due to the packetized nature of FC
|
||||
* and the api of the FC LLDD which may issue a hw command to send the
|
||||
* response, but the LLDD may not get the hw completion for that command
|
||||
* and upcall the nvmet_fc layer before a new command may be
|
||||
* asynchronously received - its possible for a command to be received
|
||||
* before the LLDD and nvmet_fc have recycled the job structure. It gives
|
||||
* the appearance of more commands received than fits in the sq.
|
||||
* To alleviate this scenario, a temporary queue is maintained in the
|
||||
* transport for pending LLDD requests waiting for a queue job structure.
|
||||
* In these "overrun" cases, a temporary queue element is allocated
|
||||
* the LLDD request and CMD iu buffer information remembered, and the
|
||||
* routine returns a -EOVERFLOW status. Subsequently, when a queue job
|
||||
* structure is freed, it is immediately reallocated for anything on the
|
||||
* pending request list. The LLDDs defer_rcv() callback is called,
|
||||
* informing the LLDD that it may reuse the CMD IU buffer, and the io
|
||||
* is then started normally with the transport.
|
||||
*
|
||||
* The LLDD, when receiving an -EOVERFLOW completion status, is to treat
|
||||
* the completion as successful but must not reuse the CMD IU buffer
|
||||
* until the LLDD's defer_rcv() callback has been called for the
|
||||
* corresponding struct nvmefc_tgt_fcp_req pointer.
|
||||
*
|
||||
* If there is any other condition in which an error occurs, the
|
||||
* transport will return a non-zero status indicating the error.
|
||||
* In all cases other than -EOVERFLOW, the transport has not accepted the
|
||||
* request and the LLDD should abort the exchange.
|
||||
*
|
||||
* @target_port: pointer to the (registered) target port the FCP CMD IU
|
||||
* was received on.
|
||||
@ -2194,6 +2317,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
|
||||
struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
|
||||
struct nvmet_fc_tgt_queue *queue;
|
||||
struct nvmet_fc_fcp_iod *fod;
|
||||
struct nvmet_fc_defer_fcp_req *deferfcp;
|
||||
unsigned long flags;
|
||||
|
||||
/* validate iu, so the connection id can be used to find the queue */
|
||||
if ((cmdiubuf_len != sizeof(*cmdiu)) ||
|
||||
@ -2214,29 +2339,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
|
||||
* when the fod is freed.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&queue->qlock, flags);
|
||||
|
||||
fod = nvmet_fc_alloc_fcp_iod(queue);
|
||||
if (!fod) {
|
||||
if (fod) {
|
||||
spin_unlock_irqrestore(&queue->qlock, flags);
|
||||
|
||||
fcpreq->nvmet_fc_private = fod;
|
||||
fod->fcpreq = fcpreq;
|
||||
|
||||
memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
|
||||
|
||||
nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!tgtport->ops->defer_rcv) {
|
||||
spin_unlock_irqrestore(&queue->qlock, flags);
|
||||
/* release the queue lookup reference */
|
||||
nvmet_fc_tgt_q_put(queue);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
fcpreq->nvmet_fc_private = fod;
|
||||
fod->fcpreq = fcpreq;
|
||||
/*
|
||||
* put all admin cmds on hw queue id 0. All io commands go to
|
||||
* the respective hw queue based on a modulo basis
|
||||
*/
|
||||
fcpreq->hwqid = queue->qid ?
|
||||
((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
|
||||
memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
|
||||
deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
|
||||
struct nvmet_fc_defer_fcp_req, req_list);
|
||||
if (deferfcp) {
|
||||
/* Just re-use one that was previously allocated */
|
||||
list_del(&deferfcp->req_list);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&queue->qlock, flags);
|
||||
|
||||
if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
|
||||
queue_work_on(queue->cpu, queue->work_q, &fod->work);
|
||||
else
|
||||
nvmet_fc_handle_fcp_rqst(tgtport, fod);
|
||||
/* Now we need to dynamically allocate one */
|
||||
deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
|
||||
if (!deferfcp) {
|
||||
/* release the queue lookup reference */
|
||||
nvmet_fc_tgt_q_put(queue);
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock_irqsave(&queue->qlock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
/* For now, use rspaddr / rsplen to save payload information */
|
||||
fcpreq->rspaddr = cmdiubuf;
|
||||
fcpreq->rsplen = cmdiubuf_len;
|
||||
deferfcp->fcp_req = fcpreq;
|
||||
|
||||
/* defer processing till a fod becomes available */
|
||||
list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
|
||||
|
||||
/* NOTE: the queue lookup reference is still valid */
|
||||
|
||||
spin_unlock_irqrestore(&queue->qlock, flags);
|
||||
|
||||
return -EOVERFLOW;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
|
||||
|
||||
|
@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
atomic_read(&tgtp->xmt_ls_rsp_error));
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"FCP: Rcv %08x Release %08x Drop %08x\n",
|
||||
"FCP: Rcv %08x Defer %08x Release %08x "
|
||||
"Drop %08x\n",
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_in),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_defer),
|
||||
atomic_read(&tgtp->xmt_fcp_release),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_drop));
|
||||
|
||||
|
@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
atomic_read(&tgtp->xmt_ls_rsp_error));
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"FCP: Rcv %08x Drop %08x\n",
|
||||
"FCP: Rcv %08x Defer %08x Release %08x "
|
||||
"Drop %08x\n",
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_in),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_defer),
|
||||
atomic_read(&tgtp->xmt_fcp_release),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_drop));
|
||||
|
||||
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
|
||||
|
@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
|
||||
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
|
||||
}
|
||||
|
||||
static void
|
||||
lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
|
||||
struct nvmefc_tgt_fcp_req *rsp)
|
||||
{
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp =
|
||||
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
|
||||
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
|
||||
struct lpfc_hba *phba = ctxp->phba;
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
|
||||
ctxp->oxid, ctxp->size, smp_processor_id());
|
||||
|
||||
tgtp = phba->targetport->private;
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
|
||||
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
|
||||
}
|
||||
|
||||
static struct nvmet_fc_target_template lpfc_tgttemplate = {
|
||||
.targetport_delete = lpfc_nvmet_targetport_delete,
|
||||
.xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
|
||||
.fcp_op = lpfc_nvmet_xmt_fcp_op,
|
||||
.fcp_abort = lpfc_nvmet_xmt_fcp_abort,
|
||||
.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
|
||||
.defer_rcv = lpfc_nvmet_defer_rcv,
|
||||
|
||||
.max_hw_queues = 1,
|
||||
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
|
||||
@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||
return;
|
||||
}
|
||||
|
||||
/* Processing of FCP command is deferred */
|
||||
if (rc == -EOVERFLOW) {
|
||||
lpfc_nvmeio_data(phba,
|
||||
"NVMET RCV BUSY: xri x%x sz %d from %06x\n",
|
||||
oxid, size, sid);
|
||||
/* defer reposting rcv buffer till .defer_rcv callback */
|
||||
ctxp->rqb_buffer = nvmebuf;
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_out);
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
|
||||
|
@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport {
|
||||
atomic_t rcv_fcp_cmd_in;
|
||||
atomic_t rcv_fcp_cmd_out;
|
||||
atomic_t rcv_fcp_cmd_drop;
|
||||
atomic_t rcv_fcp_cmd_defer;
|
||||
atomic_t xmt_fcp_release;
|
||||
|
||||
/* Stats counters - lpfc_nvmet_xmt_fcp_op */
|
||||
|
@ -346,6 +346,11 @@ struct nvme_fc_remote_port {
|
||||
* indicating an FC transport Aborted status.
|
||||
* Entrypoint is Mandatory.
|
||||
*
|
||||
* @defer_rcv: Called by the transport to signal the LLLD that it has
|
||||
* begun processing of a previously received NVME CMD IU. The LLDD
|
||||
* is now free to re-use the rcv buffer associated with the
|
||||
* nvmefc_tgt_fcp_req.
|
||||
*
|
||||
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
|
||||
* supports for cpu affinitization.
|
||||
* Value is Mandatory. Must be at least 1.
|
||||
@ -846,6 +851,8 @@ struct nvmet_fc_target_template {
|
||||
struct nvmefc_tgt_fcp_req *fcpreq);
|
||||
void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
|
||||
struct nvmefc_tgt_fcp_req *fcpreq);
|
||||
void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
|
||||
struct nvmefc_tgt_fcp_req *fcpreq);
|
||||
|
||||
u32 max_hw_queues;
|
||||
u16 max_sgl_segments;
|
||||
|
Loading…
Reference in New Issue
Block a user