forked from Minki/linux
Merge branch 'for-4.12/post-merge' of git://git.kernel.dk/linux-block
Pull second round of block layer updates from Jens Axboe: - Further fixups to the NVMe APST code, from Andy. - Various fixes for (mostly) nvme-fc, from Christoph and James. - NVMe scsi fixes from Jon and Christoph. * 'for-4.12/post-merge' of git://git.kernel.dk/linux-block: (39 commits) nvme-scsi: remove nvme_trans_security_protocol nvme-lightnvm: add missing endianess conversion in nvme_nvm_end_io nvme-scsi: Consider LBA format in IO splitting calculation nvme-fc: avoid memory corruption caused by calling nvmf_free_options() twice lpfc: Fix memory corruption of the lpfc_ncmd->list pointers nvme: Add nvme_core.force_apst to ignore the NO_APST quirk nvme: Display raw APST configuration via DYNAMIC_DEBUG nvme: Fix APST comment lpfc revison 11.2.0.12 Fix Express lane queue creation. Update ABORT processing for NVMET. Fix implicit logo and RSCN handling for NVMET Add Fabric assigned WWN support. Fix max_sgl_segments settings for NVME / NVMET Fix crash after issuing lip reset Fix driver load issues when MRQ=8 Remove hba lock from NVMET issue WQE. Fix nvme initiator handling when not enabled. Fix driver usage of 128B WQEs when WQ_CREATE is V1. Fix driver unload/reload operation. ...
This commit is contained in:
commit
08c521a201
@ -61,6 +61,10 @@ module_param(default_ps_max_latency_us, ulong, 0644);
|
||||
MODULE_PARM_DESC(default_ps_max_latency_us,
|
||||
"max power saving latency for new devices; use PM QOS to change per device");
|
||||
|
||||
static bool force_apst;
|
||||
module_param(force_apst, bool, 0644);
|
||||
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
|
||||
|
||||
static LIST_HEAD(nvme_ctrl_list);
|
||||
static DEFINE_SPINLOCK(dev_list_lock);
|
||||
|
||||
@ -1325,7 +1329,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
|
||||
* heuristic: we are willing to spend at most 2% of the time
|
||||
* transitioning between power states. Therefore, when running
|
||||
* in any given state, we will enter the next lower-power
|
||||
* non-operational state after waiting 100 * (enlat + exlat)
|
||||
* non-operational state after waiting 50 * (enlat + exlat)
|
||||
* microseconds, as long as that state's total latency is under
|
||||
* the requested maximum latency.
|
||||
*
|
||||
@ -1336,6 +1340,8 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
|
||||
|
||||
unsigned apste;
|
||||
struct nvme_feat_auto_pst *table;
|
||||
u64 max_lat_us = 0;
|
||||
int max_ps = -1;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@ -1357,6 +1363,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
|
||||
if (ctrl->ps_max_latency_us == 0) {
|
||||
/* Turn off APST. */
|
||||
apste = 0;
|
||||
dev_dbg(ctrl->device, "APST disabled\n");
|
||||
} else {
|
||||
__le64 target = cpu_to_le64(0);
|
||||
int state;
|
||||
@ -1406,9 +1413,22 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
|
||||
|
||||
target = cpu_to_le64((state << 3) |
|
||||
(transition_ms << 8));
|
||||
|
||||
if (max_ps == -1)
|
||||
max_ps = state;
|
||||
|
||||
if (total_latency_us > max_lat_us)
|
||||
max_lat_us = total_latency_us;
|
||||
}
|
||||
|
||||
apste = 1;
|
||||
|
||||
if (max_ps == -1) {
|
||||
dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
|
||||
} else {
|
||||
dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
|
||||
max_ps, max_lat_us, (int)sizeof(*table), table);
|
||||
}
|
||||
}
|
||||
|
||||
ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
|
||||
@ -1546,6 +1566,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
}
|
||||
}
|
||||
|
||||
if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
|
||||
dev_warn(ctrl->dev, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
|
||||
ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
|
||||
}
|
||||
|
||||
ctrl->oacs = le16_to_cpu(id->oacs);
|
||||
ctrl->vid = le16_to_cpu(id->vid);
|
||||
ctrl->oncs = le16_to_cpup(&id->oncs);
|
||||
@ -1568,7 +1593,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
|
||||
ctrl->npss = id->npss;
|
||||
prev_apsta = ctrl->apsta;
|
||||
ctrl->apsta = (ctrl->quirks & NVME_QUIRK_NO_APST) ? 0 : id->apsta;
|
||||
if (ctrl->quirks & NVME_QUIRK_NO_APST) {
|
||||
if (force_apst && id->apsta) {
|
||||
dev_warn(ctrl->dev, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
|
||||
ctrl->apsta = 1;
|
||||
} else {
|
||||
ctrl->apsta = 0;
|
||||
}
|
||||
} else {
|
||||
ctrl->apsta = id->apsta;
|
||||
}
|
||||
memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
|
||||
|
||||
if (ctrl->ops->is_fabrics) {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -483,7 +483,7 @@ static void nvme_nvm_end_io(struct request *rq, int error)
|
||||
{
|
||||
struct nvm_rq *rqd = rq->end_io_data;
|
||||
|
||||
rqd->ppa_status = nvme_req(rq)->result.u64;
|
||||
rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
|
||||
rqd->error = nvme_req(rq)->status;
|
||||
nvm_end_io(rqd);
|
||||
|
||||
|
@ -1609,7 +1609,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||
struct nvme_command c;
|
||||
u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
|
||||
u16 control;
|
||||
u32 max_blocks = queue_max_hw_sectors(ns->queue);
|
||||
u32 max_blocks = queue_max_hw_sectors(ns->queue) >> (ns->lba_shift - 9);
|
||||
|
||||
num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
|
||||
|
||||
@ -2138,15 +2138,6 @@ static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||
return res;
|
||||
}
|
||||
|
||||
static int nvme_trans_security_protocol(struct nvme_ns *ns,
|
||||
struct sg_io_hdr *hdr,
|
||||
u8 *cmd)
|
||||
{
|
||||
return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
|
||||
ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
|
||||
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
|
||||
}
|
||||
|
||||
static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
|
||||
struct sg_io_hdr *hdr)
|
||||
{
|
||||
@ -2414,10 +2405,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
|
||||
case REQUEST_SENSE:
|
||||
retcode = nvme_trans_request_sense(ns, hdr, cmd);
|
||||
break;
|
||||
case SECURITY_PROTOCOL_IN:
|
||||
case SECURITY_PROTOCOL_OUT:
|
||||
retcode = nvme_trans_security_protocol(ns, hdr, cmd);
|
||||
break;
|
||||
case SYNCHRONIZE_CACHE:
|
||||
retcode = nvme_trans_synchronize_cache(ns, hdr);
|
||||
break;
|
||||
|
@ -119,7 +119,7 @@ struct nvmet_fc_tgt_queue {
|
||||
u16 qid;
|
||||
u16 sqsize;
|
||||
u16 ersp_ratio;
|
||||
u16 sqhd;
|
||||
__le16 sqhd;
|
||||
int cpu;
|
||||
atomic_t connected;
|
||||
atomic_t sqtail;
|
||||
@ -1058,7 +1058,7 @@ EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
|
||||
|
||||
|
||||
static void
|
||||
nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd)
|
||||
nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
|
||||
{
|
||||
struct fcnvme_ls_acc_hdr *acc = buf;
|
||||
|
||||
@ -1700,7 +1700,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
|
||||
xfr_length != fod->total_length ||
|
||||
(le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
|
||||
(sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
|
||||
queue_90percent_full(fod->queue, cqe->sq_head))
|
||||
queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
|
||||
send_ersp = true;
|
||||
|
||||
/* re-set the fields */
|
||||
@ -2055,7 +2055,7 @@ nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
|
||||
/*
|
||||
* Actual processing routine for received FC-NVME LS Requests from the LLD
|
||||
*/
|
||||
void
|
||||
static void
|
||||
nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
||||
struct nvmet_fc_fcp_iod *fod)
|
||||
{
|
||||
|
@ -666,7 +666,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
|
||||
#define FCLOOP_SGL_SEGS 256
|
||||
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
|
||||
|
||||
struct nvme_fc_port_template fctemplate = {
|
||||
static struct nvme_fc_port_template fctemplate = {
|
||||
.localport_delete = fcloop_localport_delete,
|
||||
.remoteport_delete = fcloop_remoteport_delete,
|
||||
.create_queue = fcloop_create_queue,
|
||||
@ -686,7 +686,7 @@ struct nvme_fc_port_template fctemplate = {
|
||||
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
|
||||
};
|
||||
|
||||
struct nvmet_fc_target_template tgttemplate = {
|
||||
static struct nvmet_fc_target_template tgttemplate = {
|
||||
.targetport_delete = fcloop_targetport_delete,
|
||||
.xmt_ls_rsp = fcloop_xmt_ls_rsp,
|
||||
.fcp_op = fcloop_fcp_op,
|
||||
|
@ -56,7 +56,7 @@ struct lpfc_sli2_slim;
|
||||
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
|
||||
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
|
||||
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
|
||||
#define LPFC_MIN_NVME_SEG_CNT 254
|
||||
#define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */
|
||||
|
||||
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
|
||||
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
|
||||
@ -474,6 +474,8 @@ struct lpfc_vport {
|
||||
unsigned long rcv_buffer_time_stamp;
|
||||
uint32_t vport_flag;
|
||||
#define STATIC_VPORT 1
|
||||
#define FAWWPN_SET 2
|
||||
#define FAWWPN_PARAM_CHG 4
|
||||
|
||||
uint16_t fdmi_num_disc;
|
||||
uint32_t fdmi_hba_mask;
|
||||
@ -781,6 +783,7 @@ struct lpfc_hba {
|
||||
uint32_t cfg_nvmet_fb_size;
|
||||
uint32_t cfg_total_seg_cnt;
|
||||
uint32_t cfg_sg_seg_cnt;
|
||||
uint32_t cfg_nvme_seg_cnt;
|
||||
uint32_t cfg_sg_dma_buf_size;
|
||||
uint64_t cfg_soft_wwnn;
|
||||
uint64_t cfg_soft_wwpn;
|
||||
|
@ -2292,6 +2292,8 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
|
||||
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
unsigned int cnt = count;
|
||||
uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
|
||||
u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
|
||||
|
||||
/*
|
||||
* We're doing a simple sanity check for soft_wwpn setting.
|
||||
@ -2305,6 +2307,12 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
|
||||
* here. The intent is to protect against the random user or
|
||||
* application that is just writing attributes.
|
||||
*/
|
||||
if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0051 "LPFC_DRIVER_NAME" soft wwpn can not"
|
||||
" be enabled: fawwpn is enabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* count may include a LF at end of string */
|
||||
if (buf[cnt-1] == '\n')
|
||||
@ -3335,7 +3343,7 @@ LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
|
||||
* percentage will go to NVME.
|
||||
*/
|
||||
LPFC_ATTR_R(xri_split, 50, 10, 90,
|
||||
"Division of XRI resources between SCSI and NVME");
|
||||
"Division of XRI resources between SCSI and NVME");
|
||||
|
||||
/*
|
||||
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
|
||||
|
@ -2486,6 +2486,10 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
|
||||
mbox, *rpi);
|
||||
else {
|
||||
*rpi = lpfc_sli4_alloc_rpi(phba);
|
||||
if (*rpi == LPFC_RPI_ALLOC_ERROR) {
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
return -EBUSY;
|
||||
}
|
||||
status = lpfc_reg_rpi(phba, phba->pport->vpi,
|
||||
phba->pport->fc_myDID,
|
||||
(uint8_t *)&phba->pport->fc_sparam,
|
||||
|
@ -24,6 +24,7 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
|
||||
|
||||
struct fc_rport;
|
||||
struct fc_frame_header;
|
||||
struct lpfc_nvmet_rcv_ctx;
|
||||
void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
void lpfc_sli_read_link_ste(struct lpfc_hba *);
|
||||
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
|
||||
@ -99,7 +100,7 @@ void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
|
||||
|
||||
int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
|
||||
struct lpfc_iocbq *, struct lpfc_nodelist *);
|
||||
void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
|
||||
struct lpfc_nodelist *lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did);
|
||||
struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
|
||||
int lpfc_nlp_put(struct lpfc_nodelist *);
|
||||
int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
|
||||
@ -245,6 +246,10 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
|
||||
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
|
||||
struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
|
||||
void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
|
||||
void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
|
||||
struct lpfc_dmabuf *mp);
|
||||
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
|
||||
struct fc_frame_header *fc_hdr);
|
||||
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
|
||||
uint16_t);
|
||||
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
|
||||
@ -302,6 +307,8 @@ int lpfc_sli_check_eratt(struct lpfc_hba *);
|
||||
void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
|
||||
struct lpfc_sli_ring *, uint32_t);
|
||||
void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
|
||||
void lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
|
||||
struct fc_frame_header *fc_hdr, bool aborted);
|
||||
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
|
||||
|
@ -537,19 +537,53 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
|
||||
/*
|
||||
* To conserve rpi's, filter out addresses for other
|
||||
* vports on the same physical HBAs.
|
||||
*/
|
||||
if (Did != vport->fc_myDID &&
|
||||
(!lpfc_find_vport_by_did(phba, Did) ||
|
||||
vport->cfg_peer_port_login)) {
|
||||
if (!phba->nvmet_support) {
|
||||
/* FCPI/NVMEI path. Process Did */
|
||||
lpfc_prep_node_fc4type(vport, Did, fc4_type);
|
||||
return;
|
||||
}
|
||||
/* NVMET path. NVMET only cares about NVMEI nodes. */
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
if (ndlp->nlp_type != NLP_NVME_INITIATOR ||
|
||||
ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
|
||||
continue;
|
||||
spin_lock_irq(shost->host_lock);
|
||||
if (ndlp->nlp_DID == Did)
|
||||
ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
|
||||
else
|
||||
ndlp->nlp_flag |= NLP_NVMET_RECOV;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
|
||||
uint32_t Size)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_sli_ct_request *Response =
|
||||
(struct lpfc_sli_ct_request *) mp->virt;
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
struct lpfc_dmabuf *mlast, *next_mp;
|
||||
uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
|
||||
uint32_t Did, CTentry;
|
||||
int Cnt;
|
||||
struct list_head head;
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
|
||||
lpfc_set_disctmo(vport);
|
||||
vport->num_disc_nodes = 0;
|
||||
@ -574,19 +608,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
|
||||
/* Get next DID from NameServer List */
|
||||
CTentry = *ctptr++;
|
||||
Did = ((be32_to_cpu(CTentry)) & Mask_DID);
|
||||
|
||||
ndlp = NULL;
|
||||
|
||||
/*
|
||||
* Check for rscn processing or not
|
||||
* To conserve rpi's, filter out addresses for other
|
||||
* vports on the same physical HBAs.
|
||||
*/
|
||||
if ((Did != vport->fc_myDID) &&
|
||||
((lpfc_find_vport_by_did(phba, Did) == NULL) ||
|
||||
vport->cfg_peer_port_login))
|
||||
lpfc_prep_node_fc4type(vport, Did, fc4_type);
|
||||
|
||||
lpfc_ns_rsp_audit_did(vport, Did, fc4_type);
|
||||
if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY)))
|
||||
goto nsout1;
|
||||
|
||||
@ -596,6 +618,22 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
|
||||
|
||||
}
|
||||
|
||||
/* All GID_FT entries processed. If the driver is running in
|
||||
* in target mode, put impacted nodes into recovery and drop
|
||||
* the RPI to flush outstanding IO.
|
||||
*/
|
||||
if (vport->phba->nvmet_support) {
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
if (!(ndlp->nlp_flag & NLP_NVMET_RECOV))
|
||||
continue;
|
||||
lpfc_disc_state_machine(vport, ndlp, NULL,
|
||||
NLP_EVT_DEVICE_RECOVERY);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
|
||||
spin_lock_irq(shost->host_lock);
|
||||
}
|
||||
}
|
||||
|
||||
nsout1:
|
||||
list_del(&head);
|
||||
return 0;
|
||||
|
@ -745,73 +745,102 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
|
||||
int len = 0;
|
||||
int cnt;
|
||||
|
||||
if (phba->nvmet_support) {
|
||||
if (!phba->targetport)
|
||||
return len;
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
len += snprintf(buf+len, size-len,
|
||||
len += snprintf(buf + len, size - len,
|
||||
"\nNVME Targetport Statistics\n");
|
||||
|
||||
len += snprintf(buf+len, size-len,
|
||||
len += snprintf(buf + len, size - len,
|
||||
"LS: Rcv %08x Drop %08x Abort %08x\n",
|
||||
atomic_read(&tgtp->rcv_ls_req_in),
|
||||
atomic_read(&tgtp->rcv_ls_req_drop),
|
||||
atomic_read(&tgtp->xmt_ls_abort));
|
||||
if (atomic_read(&tgtp->rcv_ls_req_in) !=
|
||||
atomic_read(&tgtp->rcv_ls_req_out)) {
|
||||
len += snprintf(buf+len, size-len,
|
||||
len += snprintf(buf + len, size - len,
|
||||
"Rcv LS: in %08x != out %08x\n",
|
||||
atomic_read(&tgtp->rcv_ls_req_in),
|
||||
atomic_read(&tgtp->rcv_ls_req_out));
|
||||
}
|
||||
|
||||
len += snprintf(buf+len, size-len,
|
||||
len += snprintf(buf + len, size - len,
|
||||
"LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
|
||||
atomic_read(&tgtp->xmt_ls_rsp),
|
||||
atomic_read(&tgtp->xmt_ls_drop),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_cmpl),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_error));
|
||||
|
||||
len += snprintf(buf+len, size-len,
|
||||
len += snprintf(buf + len, size - len,
|
||||
"FCP: Rcv %08x Drop %08x\n",
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_in),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_drop));
|
||||
|
||||
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_out)) {
|
||||
len += snprintf(buf+len, size-len,
|
||||
len += snprintf(buf + len, size - len,
|
||||
"Rcv FCP: in %08x != out %08x\n",
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_in),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_out));
|
||||
}
|
||||
|
||||
len += snprintf(buf+len, size-len,
|
||||
"FCP Rsp: read %08x readrsp %08x write %08x rsp %08x\n",
|
||||
len += snprintf(buf + len, size - len,
|
||||
"FCP Rsp: read %08x readrsp %08x "
|
||||
"write %08x rsp %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_read),
|
||||
atomic_read(&tgtp->xmt_fcp_read_rsp),
|
||||
atomic_read(&tgtp->xmt_fcp_write),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp));
|
||||
|
||||
len += snprintf(buf+len, size-len,
|
||||
len += snprintf(buf + len, size - len,
|
||||
"FCP Rsp: abort %08x drop %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_abort),
|
||||
atomic_read(&tgtp->xmt_fcp_drop));
|
||||
|
||||
len += snprintf(buf+len, size-len,
|
||||
len += snprintf(buf + len, size - len,
|
||||
"FCP Rsp Cmpl: %08x err %08x drop %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_error),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_drop));
|
||||
|
||||
len += snprintf(buf+len, size-len,
|
||||
len += snprintf(buf + len, size - len,
|
||||
"ABORT: Xmt %08x Err %08x Cmpl %08x",
|
||||
atomic_read(&tgtp->xmt_abort_rsp),
|
||||
atomic_read(&tgtp->xmt_abort_rsp_error),
|
||||
atomic_read(&tgtp->xmt_abort_cmpl));
|
||||
|
||||
len += snprintf(buf+len, size-len, "\n");
|
||||
len += snprintf(buf + len, size - len, "\n");
|
||||
|
||||
cnt = 0;
|
||||
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
list_for_each_entry_safe(ctxp, next_ctxp,
|
||||
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
|
||||
list) {
|
||||
cnt++;
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
if (cnt) {
|
||||
len += snprintf(buf + len, size - len,
|
||||
"ABORT: %d ctx entries\n", cnt);
|
||||
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
list_for_each_entry_safe(ctxp, next_ctxp,
|
||||
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
|
||||
list) {
|
||||
if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ))
|
||||
break;
|
||||
len += snprintf(buf + len, size - len,
|
||||
"Entry: oxid %x state %x "
|
||||
"flag %x\n",
|
||||
ctxp->oxid, ctxp->state,
|
||||
ctxp->flag);
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
}
|
||||
} else {
|
||||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return len;
|
||||
@ -3128,8 +3157,6 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
|
||||
datqp->queue_id, datqp->entry_count,
|
||||
datqp->entry_size, datqp->host_index,
|
||||
datqp->hba_index);
|
||||
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
@ -5700,10 +5727,8 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
|
||||
if (vport->disc_trc) {
|
||||
kfree(vport->disc_trc);
|
||||
vport->disc_trc = NULL;
|
||||
}
|
||||
kfree(vport->disc_trc);
|
||||
vport->disc_trc = NULL;
|
||||
|
||||
debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
|
||||
vport->debug_disc_trc = NULL;
|
||||
@ -5770,10 +5795,8 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
|
||||
debugfs_remove(phba->debug_readRef); /* readRef */
|
||||
phba->debug_readRef = NULL;
|
||||
|
||||
if (phba->slow_ring_trc) {
|
||||
kfree(phba->slow_ring_trc);
|
||||
phba->slow_ring_trc = NULL;
|
||||
}
|
||||
kfree(phba->slow_ring_trc);
|
||||
phba->slow_ring_trc = NULL;
|
||||
|
||||
/* slow_ring_trace */
|
||||
debugfs_remove(phba->debug_slow_ring_trc);
|
||||
|
@ -157,6 +157,7 @@ struct lpfc_node_rrq {
|
||||
#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */
|
||||
#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */
|
||||
#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
|
||||
#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
|
||||
#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */
|
||||
#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
|
||||
#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */
|
||||
|
@ -603,9 +603,11 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
|
||||
memcmp(&vport->fabric_portname, &sp->portName,
|
||||
sizeof(struct lpfc_name)) ||
|
||||
memcmp(&vport->fabric_nodename, &sp->nodeName,
|
||||
sizeof(struct lpfc_name)))
|
||||
sizeof(struct lpfc_name)) ||
|
||||
(vport->vport_flag & FAWWPN_PARAM_CHG)) {
|
||||
fabric_param_changed = 1;
|
||||
|
||||
vport->vport_flag &= ~FAWWPN_PARAM_CHG;
|
||||
}
|
||||
/*
|
||||
* Word 1 Bit 31 in common service parameter is overloaded.
|
||||
* Word 1 Bit 31 in FLOGI request is multiple NPort request
|
||||
@ -895,10 +897,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
* Cannot find existing Fabric ndlp, so allocate a
|
||||
* new one
|
||||
*/
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
|
||||
if (!ndlp)
|
||||
goto fail;
|
||||
lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
|
||||
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
|
||||
ndlp = lpfc_enable_node(vport, ndlp,
|
||||
NLP_STE_UNUSED_NODE);
|
||||
@ -1364,7 +1365,6 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
|
||||
int
|
||||
lpfc_initial_flogi(struct lpfc_vport *vport)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
||||
vport->port_state = LPFC_FLOGI;
|
||||
@ -1374,10 +1374,9 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
|
||||
ndlp = lpfc_findnode_did(vport, Fabric_DID);
|
||||
if (!ndlp) {
|
||||
/* Cannot find existing Fabric ndlp, so allocate a new one */
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, Fabric_DID);
|
||||
if (!ndlp)
|
||||
return 0;
|
||||
lpfc_nlp_init(vport, ndlp, Fabric_DID);
|
||||
/* Set the node type */
|
||||
ndlp->nlp_type |= NLP_FABRIC;
|
||||
/* Put ndlp onto node list */
|
||||
@ -1418,17 +1417,15 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
|
||||
int
|
||||
lpfc_initial_fdisc(struct lpfc_vport *vport)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
||||
/* First look for the Fabric ndlp */
|
||||
ndlp = lpfc_findnode_did(vport, Fabric_DID);
|
||||
if (!ndlp) {
|
||||
/* Cannot find existing Fabric ndlp, so allocate a new one */
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, Fabric_DID);
|
||||
if (!ndlp)
|
||||
return 0;
|
||||
lpfc_nlp_init(vport, ndlp, Fabric_DID);
|
||||
/* Put ndlp onto node list */
|
||||
lpfc_enqueue_node(vport, ndlp);
|
||||
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
|
||||
@ -1564,14 +1561,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
|
||||
phba->active_rrq_pool);
|
||||
return ndlp;
|
||||
}
|
||||
new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
|
||||
new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
|
||||
if (!new_ndlp) {
|
||||
if (active_rrqs_xri_bitmap)
|
||||
mempool_free(active_rrqs_xri_bitmap,
|
||||
phba->active_rrq_pool);
|
||||
return ndlp;
|
||||
}
|
||||
lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
|
||||
} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
|
||||
rc = memcmp(&ndlp->nlp_portname, name,
|
||||
sizeof(struct lpfc_name));
|
||||
@ -2845,10 +2841,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, nportid);
|
||||
if (!ndlp) {
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, nportid);
|
||||
if (!ndlp)
|
||||
return 1;
|
||||
lpfc_nlp_init(vport, ndlp, nportid);
|
||||
lpfc_enqueue_node(vport, ndlp);
|
||||
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
|
||||
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
|
||||
@ -2938,10 +2933,9 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, nportid);
|
||||
if (!ndlp) {
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, nportid);
|
||||
if (!ndlp)
|
||||
return 1;
|
||||
lpfc_nlp_init(vport, ndlp, nportid);
|
||||
lpfc_enqueue_node(vport, ndlp);
|
||||
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
|
||||
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
|
||||
@ -4403,7 +4397,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
|
||||
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
memset(pcmd, 0, cmdsize);
|
||||
|
||||
*((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
|
||||
*((uint32_t *)(pcmd)) = elsrspcmd;
|
||||
pcmd += sizeof(uint32_t);
|
||||
|
||||
/* For PRLI, remainder of payload is PRLI parameter page */
|
||||
@ -5867,8 +5861,11 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
|
||||
(ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
|
||||
!lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
|
||||
continue;
|
||||
|
||||
/* NVME Target mode does not do RSCN Recovery. */
|
||||
if (vport->phba->nvmet_support)
|
||||
continue;
|
||||
|
||||
lpfc_disc_state_machine(vport, ndlp, NULL,
|
||||
NLP_EVT_DEVICE_RECOVERY);
|
||||
lpfc_cancel_retry_delay_tmo(vport, ndlp);
|
||||
@ -6133,7 +6130,6 @@ int
|
||||
lpfc_els_handle_rscn(struct lpfc_vport *vport)
|
||||
{
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
|
||||
/* Ignore RSCN if the port is being torn down. */
|
||||
if (vport->load_flag & FC_UNLOADING) {
|
||||
@ -6157,22 +6153,16 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
|
||||
ndlp = lpfc_findnode_did(vport, NameServer_DID);
|
||||
if (ndlp && NLP_CHK_NODE_ACT(ndlp)
|
||||
&& ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
|
||||
/* Good ndlp, issue CT Request to NameServer */
|
||||
/* Good ndlp, issue CT Request to NameServer. Need to
|
||||
* know how many gidfts were issued. If none, then just
|
||||
* flush the RSCN. Otherwise, the outstanding requests
|
||||
* need to complete.
|
||||
*/
|
||||
vport->gidft_inp = 0;
|
||||
if (lpfc_issue_gidft(vport) == 0)
|
||||
/* Wait for NameServer query cmpl before we can
|
||||
* continue
|
||||
*/
|
||||
if (lpfc_issue_gidft(vport) > 0)
|
||||
return 1;
|
||||
} else {
|
||||
/* If login to NameServer does not exist, issue one */
|
||||
/* Good status, issue PLOGI to NameServer */
|
||||
ndlp = lpfc_findnode_did(vport, NameServer_DID);
|
||||
if (ndlp && NLP_CHK_NODE_ACT(ndlp))
|
||||
/* Wait for NameServer login cmpl before we can
|
||||
continue */
|
||||
return 1;
|
||||
|
||||
/* Nameserver login in question. Revalidate. */
|
||||
if (ndlp) {
|
||||
ndlp = lpfc_enable_node(vport, ndlp,
|
||||
NLP_STE_PLOGI_ISSUE);
|
||||
@ -6182,12 +6172,11 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
|
||||
}
|
||||
ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
|
||||
} else {
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, NameServer_DID);
|
||||
if (!ndlp) {
|
||||
lpfc_els_flush_rscn(vport);
|
||||
return 0;
|
||||
}
|
||||
lpfc_nlp_init(vport, ndlp, NameServer_DID);
|
||||
ndlp->nlp_prev_state = ndlp->nlp_state;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
|
||||
}
|
||||
@ -7746,11 +7735,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
ndlp = lpfc_findnode_did(vport, did);
|
||||
if (!ndlp) {
|
||||
/* Cannot find existing Fabric ndlp, so allocate a new one */
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, did);
|
||||
if (!ndlp)
|
||||
goto dropit;
|
||||
|
||||
lpfc_nlp_init(vport, ndlp, did);
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
newnode = 1;
|
||||
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
|
||||
@ -8193,7 +8180,6 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
static void
|
||||
lpfc_start_fdmi(struct lpfc_vport *vport)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
||||
/* If this is the first time, allocate an ndlp and initialize
|
||||
@ -8202,9 +8188,8 @@ lpfc_start_fdmi(struct lpfc_vport *vport)
|
||||
*/
|
||||
ndlp = lpfc_findnode_did(vport, FDMI_DID);
|
||||
if (!ndlp) {
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, FDMI_DID);
|
||||
if (ndlp) {
|
||||
lpfc_nlp_init(vport, ndlp, FDMI_DID);
|
||||
ndlp->nlp_type |= NLP_FABRIC;
|
||||
} else {
|
||||
return;
|
||||
@ -8257,7 +8242,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, NameServer_DID);
|
||||
if (!ndlp) {
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, NameServer_DID);
|
||||
if (!ndlp) {
|
||||
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
|
||||
lpfc_disc_start(vport);
|
||||
@ -8268,7 +8253,6 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
|
||||
"0251 NameServer login: no memory\n");
|
||||
return;
|
||||
}
|
||||
lpfc_nlp_init(vport, ndlp, NameServer_DID);
|
||||
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
|
||||
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
|
||||
if (!ndlp) {
|
||||
@ -8771,7 +8755,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
pcmd += sizeof(uint32_t); /* Node Name */
|
||||
pcmd += sizeof(uint32_t); /* Node Name */
|
||||
memcpy(pcmd, &vport->fc_nodename, 8);
|
||||
|
||||
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
|
||||
lpfc_set_disctmo(vport);
|
||||
|
||||
phba->fc_stat.elsXmitFDISC++;
|
||||
|
@ -3002,6 +3002,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
MAILBOX_t *mb = &pmb->u.mb;
|
||||
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
|
||||
struct lpfc_vport *vport = pmb->vport;
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
struct serv_parm *sp = &vport->fc_sparam;
|
||||
uint32_t ed_tov;
|
||||
|
||||
@ -3031,6 +3032,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
}
|
||||
|
||||
lpfc_update_vport_wwn(vport);
|
||||
fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
|
||||
if (vport->port_type == LPFC_PHYSICAL_PORT) {
|
||||
memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
|
||||
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
|
||||
@ -3309,6 +3311,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
struct lpfc_sli_ring *pring;
|
||||
MAILBOX_t *mb = &pmb->u.mb;
|
||||
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
|
||||
uint8_t attn_type;
|
||||
|
||||
/* Unblock ELS traffic */
|
||||
pring = lpfc_phba_elsring(phba);
|
||||
@ -3325,6 +3328,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
}
|
||||
|
||||
la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
|
||||
attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
|
||||
|
||||
memcpy(&phba->alpa_map[0], mp->virt, 128);
|
||||
|
||||
@ -3337,7 +3341,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
|
||||
if (phba->fc_eventTag <= la->eventTag) {
|
||||
phba->fc_stat.LinkMultiEvent++;
|
||||
if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
|
||||
if (attn_type == LPFC_ATT_LINK_UP)
|
||||
if (phba->fc_eventTag != 0)
|
||||
lpfc_linkdown(phba);
|
||||
}
|
||||
@ -3353,7 +3357,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
}
|
||||
|
||||
phba->link_events++;
|
||||
if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
|
||||
if ((attn_type == LPFC_ATT_LINK_UP) &&
|
||||
!(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
|
||||
phba->fc_stat.LinkUp++;
|
||||
if (phba->link_flag & LS_LOOPBACK_MODE) {
|
||||
@ -3379,8 +3383,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
phba->wait_4_mlo_maint_flg);
|
||||
}
|
||||
lpfc_mbx_process_link_up(phba, la);
|
||||
} else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
|
||||
LPFC_ATT_LINK_DOWN) {
|
||||
} else if (attn_type == LPFC_ATT_LINK_DOWN ||
|
||||
attn_type == LPFC_ATT_UNEXP_WWPN) {
|
||||
phba->fc_stat.LinkDown++;
|
||||
if (phba->link_flag & LS_LOOPBACK_MODE)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
|
||||
@ -3389,6 +3393,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
"Data: x%x x%x x%x\n",
|
||||
la->eventTag, phba->fc_eventTag,
|
||||
phba->pport->port_state, vport->fc_flag);
|
||||
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
|
||||
"1313 Link Down UNEXP WWPN Event x%x received "
|
||||
"Data: x%x x%x x%x x%x x%x\n",
|
||||
la->eventTag, phba->fc_eventTag,
|
||||
phba->pport->port_state, vport->fc_flag,
|
||||
bf_get(lpfc_mbx_read_top_mm, la),
|
||||
bf_get(lpfc_mbx_read_top_fa, la));
|
||||
else
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
|
||||
"1305 Link Down Event x%x received "
|
||||
@ -3399,8 +3411,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
bf_get(lpfc_mbx_read_top_fa, la));
|
||||
lpfc_mbx_issue_link_down(phba);
|
||||
}
|
||||
if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) &&
|
||||
((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) {
|
||||
if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
|
||||
attn_type == LPFC_ATT_LINK_UP) {
|
||||
if (phba->link_state != LPFC_LINK_DOWN) {
|
||||
phba->fc_stat.LinkDown++;
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
|
||||
@ -4136,7 +4148,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
int old_state, int new_state)
|
||||
{
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
|
||||
if (new_state == NLP_STE_UNMAPPED_NODE) {
|
||||
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
|
||||
@ -4155,14 +4166,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
lpfc_unregister_remote_port(ndlp);
|
||||
}
|
||||
|
||||
/* Notify the NVME transport of this rport's loss */
|
||||
if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
|
||||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
|
||||
(vport->phba->nvmet_support == 0) &&
|
||||
((ndlp->nlp_fc4_type & NLP_FC4_NVME) ||
|
||||
(ndlp->nlp_DID == Fabric_DID))) {
|
||||
/* Notify the NVME transport of this rport's loss on the
|
||||
* Initiator. For NVME Target, should upcall transport
|
||||
* in the else clause when API available.
|
||||
*/
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
lpfc_nvme_unregister_port(vport, ndlp);
|
||||
if (vport->phba->nvmet_support == 0)
|
||||
lpfc_nvme_unregister_port(vport, ndlp);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4368,10 +4379,17 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
uint32_t did;
|
||||
unsigned long flags;
|
||||
unsigned long *active_rrqs_xri_bitmap = NULL;
|
||||
int rpi = LPFC_RPI_ALLOC_ERROR;
|
||||
|
||||
if (!ndlp)
|
||||
return NULL;
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
rpi = lpfc_sli4_alloc_rpi(vport->phba);
|
||||
if (rpi == LPFC_RPI_ALLOC_ERROR)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&phba->ndlp_lock, flags);
|
||||
/* The ndlp should not be in memory free mode */
|
||||
if (NLP_CHK_FREE_REQ(ndlp)) {
|
||||
@ -4381,7 +4399,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
"usgmap:x%x refcnt:%d\n",
|
||||
(void *)ndlp, ndlp->nlp_usg_map,
|
||||
kref_read(&ndlp->kref));
|
||||
return NULL;
|
||||
goto free_rpi;
|
||||
}
|
||||
/* The ndlp should not already be in active mode */
|
||||
if (NLP_CHK_NODE_ACT(ndlp)) {
|
||||
@ -4391,7 +4409,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
"usgmap:x%x refcnt:%d\n",
|
||||
(void *)ndlp, ndlp->nlp_usg_map,
|
||||
kref_read(&ndlp->kref));
|
||||
return NULL;
|
||||
goto free_rpi;
|
||||
}
|
||||
|
||||
/* Keep the original DID */
|
||||
@ -4409,7 +4427,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
|
||||
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
|
||||
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
|
||||
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
|
||||
ndlp->nlp_rpi = rpi;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"0008 rpi:%x DID:%x flg:%x refcnt:%d "
|
||||
"map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
@ -4426,6 +4444,11 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
"node enable: did:x%x",
|
||||
ndlp->nlp_DID, 0, 0);
|
||||
return ndlp;
|
||||
|
||||
free_rpi:
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
lpfc_sli4_free_rpi(vport->phba, rpi);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
@ -5104,65 +5127,82 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, did);
|
||||
if (!ndlp) {
|
||||
if (vport->phba->nvmet_support)
|
||||
return NULL;
|
||||
if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
|
||||
lpfc_rscn_payload_check(vport, did) == 0)
|
||||
return NULL;
|
||||
ndlp = (struct lpfc_nodelist *)
|
||||
mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, did);
|
||||
if (!ndlp)
|
||||
return NULL;
|
||||
lpfc_nlp_init(vport, ndlp, did);
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
if (vport->phba->nvmet_support)
|
||||
return ndlp;
|
||||
spin_lock_irq(shost->host_lock);
|
||||
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
return ndlp;
|
||||
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
|
||||
if (vport->phba->nvmet_support)
|
||||
return NULL;
|
||||
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
if (!ndlp)
|
||||
return NULL;
|
||||
if (vport->phba->nvmet_support)
|
||||
return ndlp;
|
||||
spin_lock_irq(shost->host_lock);
|
||||
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
return ndlp;
|
||||
}
|
||||
|
||||
/* The NVME Target does not want to actively manage an rport.
|
||||
* The goal is to allow the target to reset its state and clear
|
||||
* pending IO in preparation for the initiator to recover.
|
||||
*/
|
||||
if ((vport->fc_flag & FC_RSCN_MODE) &&
|
||||
!(vport->fc_flag & FC_NDISC_ACTIVE)) {
|
||||
if (lpfc_rscn_payload_check(vport, did)) {
|
||||
|
||||
/* Since this node is marked for discovery,
|
||||
* delay timeout is not needed.
|
||||
*/
|
||||
lpfc_cancel_retry_delay_tmo(vport, ndlp);
|
||||
|
||||
/* NVME Target mode waits until rport is known to be
|
||||
* impacted by the RSCN before it transitions. No
|
||||
* active management - just go to NPR provided the
|
||||
* node had a valid login.
|
||||
*/
|
||||
if (vport->phba->nvmet_support)
|
||||
return ndlp;
|
||||
|
||||
/* If we've already received a PLOGI from this NPort
|
||||
* we don't need to try to discover it again.
|
||||
*/
|
||||
if (ndlp->nlp_flag & NLP_RCV_PLOGI)
|
||||
return NULL;
|
||||
|
||||
/* Since this node is marked for discovery,
|
||||
* delay timeout is not needed.
|
||||
*/
|
||||
lpfc_cancel_retry_delay_tmo(vport, ndlp);
|
||||
if (vport->phba->nvmet_support)
|
||||
return ndlp;
|
||||
spin_lock_irq(shost->host_lock);
|
||||
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
} else
|
||||
ndlp = NULL;
|
||||
} else {
|
||||
/* If we've already received a PLOGI from this NPort,
|
||||
* or we are already in the process of discovery on it,
|
||||
* we don't need to try to discover it again.
|
||||
/* If the initiator received a PLOGI from this NPort or if the
|
||||
* initiator is already in the process of discovery on it,
|
||||
* there's no need to try to discover it again.
|
||||
*/
|
||||
if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
|
||||
ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
|
||||
ndlp->nlp_flag & NLP_RCV_PLOGI)
|
||||
(!vport->phba->nvmet_support &&
|
||||
ndlp->nlp_flag & NLP_RCV_PLOGI))
|
||||
return NULL;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
|
||||
if (vport->phba->nvmet_support)
|
||||
return ndlp;
|
||||
|
||||
/* Moving to NPR state clears unsolicited flags and
|
||||
* allows for rediscovery
|
||||
*/
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
@ -5887,16 +5927,31 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
uint32_t did)
|
||||
struct lpfc_nodelist *
|
||||
lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
|
||||
{
|
||||
struct lpfc_nodelist *ndlp;
|
||||
int rpi = LPFC_RPI_ALLOC_ERROR;
|
||||
|
||||
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
|
||||
rpi = lpfc_sli4_alloc_rpi(vport->phba);
|
||||
if (rpi == LPFC_RPI_ALLOC_ERROR)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
|
||||
if (!ndlp) {
|
||||
if (vport->phba->sli_rev == LPFC_SLI_REV4)
|
||||
lpfc_sli4_free_rpi(vport->phba, rpi);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
|
||||
|
||||
lpfc_initialize_node(vport, ndlp, did);
|
||||
INIT_LIST_HEAD(&ndlp->nlp_listp);
|
||||
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
|
||||
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
|
||||
ndlp->nlp_rpi = rpi;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"0007 rpi:%x DID:%x flg:%x refcnt:%d "
|
||||
"map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
@ -5918,7 +5973,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
"node init: did:x%x",
|
||||
ndlp->nlp_DID, 0, 0);
|
||||
|
||||
return;
|
||||
return ndlp;
|
||||
}
|
||||
|
||||
/* This routine releases all resources associated with a specifc NPort's ndlp
|
||||
|
@ -509,6 +509,8 @@ struct class_parms {
|
||||
uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
|
||||
};
|
||||
|
||||
#define FAPWWN_KEY_VENDOR 0x42524344 /*valid vendor version fawwpn key*/
|
||||
|
||||
struct serv_parm { /* Structure is in Big Endian format */
|
||||
struct csp cmn;
|
||||
struct lpfc_name portName;
|
||||
@ -2885,6 +2887,7 @@ struct lpfc_mbx_read_top {
|
||||
#define LPFC_ATT_RESERVED 0x00 /* Reserved - attType */
|
||||
#define LPFC_ATT_LINK_UP 0x01 /* Link is up */
|
||||
#define LPFC_ATT_LINK_DOWN 0x02 /* Link is down */
|
||||
#define LPFC_ATT_UNEXP_WWPN 0x06 /* Link is down Unexpected WWWPN */
|
||||
uint32_t word3;
|
||||
#define lpfc_mbx_read_top_alpa_granted_SHIFT 24
|
||||
#define lpfc_mbx_read_top_alpa_granted_MASK 0x000000FF
|
||||
|
@ -2720,6 +2720,9 @@ struct lpfc_mbx_request_features {
|
||||
#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
|
||||
#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
|
||||
#define lpfc_mbx_rq_ftr_rq_iaar_SHIFT 9
|
||||
#define lpfc_mbx_rq_ftr_rq_iaar_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rq_iaar_WORD word2
|
||||
#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11
|
||||
#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2
|
||||
@ -3853,6 +3856,7 @@ struct lpfc_acqe_fc_la {
|
||||
#define LPFC_FC_LA_TYPE_NO_HARD_ALPA 0x3
|
||||
#define LPFC_FC_LA_TYPE_MDS_LINK_DOWN 0x4
|
||||
#define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5
|
||||
#define LPFC_FC_LA_TYPE_UNEXP_WWPN 0x6
|
||||
#define lpfc_acqe_fc_la_port_type_SHIFT 6
|
||||
#define lpfc_acqe_fc_la_port_type_MASK 0x00000003
|
||||
#define lpfc_acqe_fc_la_port_type_WORD word0
|
||||
|
@ -42,6 +42,10 @@
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include <scsi/fc/fc_fs.h>
|
||||
|
||||
#include <linux/nvme-fc-driver.h>
|
||||
|
||||
#include "lpfc_hw4.h"
|
||||
#include "lpfc_hw.h"
|
||||
@ -52,6 +56,7 @@
|
||||
#include "lpfc.h"
|
||||
#include "lpfc_scsi.h"
|
||||
#include "lpfc_nvme.h"
|
||||
#include "lpfc_nvmet.h"
|
||||
#include "lpfc_logmsg.h"
|
||||
#include "lpfc_crtn.h"
|
||||
#include "lpfc_vport.h"
|
||||
@ -335,6 +340,9 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
|
||||
void
|
||||
lpfc_update_vport_wwn(struct lpfc_vport *vport)
|
||||
{
|
||||
uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
|
||||
u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
|
||||
|
||||
/* If the soft name exists then update it using the service params */
|
||||
if (vport->phba->cfg_soft_wwnn)
|
||||
u64_to_wwn(vport->phba->cfg_soft_wwnn,
|
||||
@ -354,9 +362,25 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport)
|
||||
memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
|
||||
sizeof(struct lpfc_name));
|
||||
|
||||
if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
|
||||
/*
|
||||
* If the port name has changed, then set the Param changes flag
|
||||
* to unreg the login
|
||||
*/
|
||||
if (vport->fc_portname.u.wwn[0] != 0 &&
|
||||
memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
|
||||
sizeof(struct lpfc_name)))
|
||||
vport->vport_flag |= FAWWPN_PARAM_CHG;
|
||||
|
||||
if (vport->fc_portname.u.wwn[0] == 0 ||
|
||||
vport->phba->cfg_soft_wwpn ||
|
||||
(vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
|
||||
vport->vport_flag & FAWWPN_SET) {
|
||||
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
|
||||
sizeof(struct lpfc_name));
|
||||
vport->vport_flag &= ~FAWWPN_SET;
|
||||
if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
|
||||
vport->vport_flag |= FAWWPN_SET;
|
||||
}
|
||||
else
|
||||
memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
|
||||
sizeof(struct lpfc_name));
|
||||
@ -1003,8 +1027,10 @@ static int
|
||||
lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_scsi_buf *psb, *psb_next;
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
|
||||
LIST_HEAD(aborts);
|
||||
LIST_HEAD(nvme_aborts);
|
||||
LIST_HEAD(nvmet_aborts);
|
||||
unsigned long iflag = 0;
|
||||
struct lpfc_sglq *sglq_entry = NULL;
|
||||
|
||||
@ -1027,16 +1053,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
list_for_each_entry(sglq_entry,
|
||||
&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
|
||||
sglq_entry->state = SGL_FREED;
|
||||
list_for_each_entry(sglq_entry,
|
||||
&phba->sli4_hba.lpfc_abts_nvmet_sgl_list, list)
|
||||
sglq_entry->state = SGL_FREED;
|
||||
|
||||
list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
|
||||
&phba->sli4_hba.lpfc_els_sgl_list);
|
||||
|
||||
if (phba->sli4_hba.nvme_wq)
|
||||
list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list,
|
||||
&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
||||
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
/* abts_scsi_buf_list_lock required because worker thread uses this
|
||||
@ -1053,6 +1073,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
|
||||
&nvme_aborts);
|
||||
list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
|
||||
&nvmet_aborts);
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
}
|
||||
|
||||
@ -1066,13 +1088,20 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
|
||||
|
||||
list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
|
||||
psb->pCmd = NULL;
|
||||
psb->status = IOSTAT_SUCCESS;
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
|
||||
psb->pCmd = NULL;
|
||||
psb->status = IOSTAT_SUCCESS;
|
||||
}
|
||||
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
|
||||
list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
|
||||
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
|
||||
|
||||
list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
|
||||
ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
|
||||
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
|
||||
}
|
||||
}
|
||||
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
|
||||
list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
|
||||
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
|
||||
|
||||
lpfc_sli4_free_sp_events(phba);
|
||||
return 0;
|
||||
@ -2874,34 +2903,38 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_nodelist *ndlp, *next_ndlp;
|
||||
struct lpfc_vport **vports;
|
||||
int i;
|
||||
int i, rpi;
|
||||
unsigned long flags;
|
||||
|
||||
if (phba->sli_rev != LPFC_SLI_REV4)
|
||||
return;
|
||||
|
||||
vports = lpfc_create_vport_work_array(phba);
|
||||
if (vports != NULL) {
|
||||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
|
||||
if (vports[i]->load_flag & FC_UNLOADING)
|
||||
continue;
|
||||
if (vports == NULL)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(ndlp, next_ndlp,
|
||||
&vports[i]->fc_nodes,
|
||||
nlp_listp) {
|
||||
if (NLP_CHK_NODE_ACT(ndlp)) {
|
||||
ndlp->nlp_rpi =
|
||||
lpfc_sli4_alloc_rpi(phba);
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
|
||||
LOG_NODE,
|
||||
"0009 rpi:%x DID:%x "
|
||||
"flg:%x map:%x %p\n",
|
||||
ndlp->nlp_rpi,
|
||||
ndlp->nlp_DID,
|
||||
ndlp->nlp_flag,
|
||||
ndlp->nlp_usg_map,
|
||||
ndlp);
|
||||
}
|
||||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
|
||||
if (vports[i]->load_flag & FC_UNLOADING)
|
||||
continue;
|
||||
|
||||
list_for_each_entry_safe(ndlp, next_ndlp,
|
||||
&vports[i]->fc_nodes,
|
||||
nlp_listp) {
|
||||
if (!NLP_CHK_NODE_ACT(ndlp))
|
||||
continue;
|
||||
rpi = lpfc_sli4_alloc_rpi(phba);
|
||||
if (rpi == LPFC_RPI_ALLOC_ERROR) {
|
||||
spin_lock_irqsave(&phba->ndlp_lock, flags);
|
||||
NLP_CLR_NODE_ACT(ndlp);
|
||||
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
|
||||
continue;
|
||||
}
|
||||
ndlp->nlp_rpi = rpi;
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
|
||||
"0009 rpi:%x DID:%x "
|
||||
"flg:%x map:%x %p\n", ndlp->nlp_rpi,
|
||||
ndlp->nlp_DID, ndlp->nlp_flag,
|
||||
ndlp->nlp_usg_map, ndlp);
|
||||
}
|
||||
}
|
||||
lpfc_destroy_vport_work_array(phba, vports);
|
||||
@ -3508,6 +3541,12 @@ lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
|
||||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"6060 Current allocated SCSI xri-sgl count:%d, "
|
||||
"maximum SCSI xri count:%d (split:%d)\n",
|
||||
phba->sli4_hba.scsi_xri_cnt,
|
||||
phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
|
||||
|
||||
if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
|
||||
/* max scsi xri shrinked below the allocated scsi buffers */
|
||||
scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
|
||||
@ -4508,9 +4547,15 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
|
||||
/* Parse and translate link attention fields */
|
||||
la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
|
||||
la->eventTag = acqe_fc->event_tag;
|
||||
bf_set(lpfc_mbx_read_top_att_type, la,
|
||||
LPFC_FC_LA_TYPE_LINK_DOWN);
|
||||
|
||||
if (phba->sli4_hba.link_state.status ==
|
||||
LPFC_FC_LA_TYPE_UNEXP_WWPN) {
|
||||
bf_set(lpfc_mbx_read_top_att_type, la,
|
||||
LPFC_FC_LA_TYPE_UNEXP_WWPN);
|
||||
} else {
|
||||
bf_set(lpfc_mbx_read_top_att_type, la,
|
||||
LPFC_FC_LA_TYPE_LINK_DOWN);
|
||||
}
|
||||
/* Invoke the mailbox command callback function */
|
||||
lpfc_mbx_cmpl_read_topology(phba, pmb);
|
||||
|
||||
@ -4716,10 +4761,9 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
|
||||
ndlp = lpfc_findnode_did(vport, Fabric_DID);
|
||||
if (!ndlp) {
|
||||
/* Cannot find existing Fabric ndlp, so allocate a new one */
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, Fabric_DID);
|
||||
if (!ndlp)
|
||||
return 0;
|
||||
lpfc_nlp_init(vport, ndlp, Fabric_DID);
|
||||
/* Set the node type */
|
||||
ndlp->nlp_type |= NLP_FABRIC;
|
||||
/* Put ndlp onto node list */
|
||||
@ -5778,6 +5822,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
/* Initialize the Abort nvme buffer list used by driver */
|
||||
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
|
||||
/* Fast-path XRI aborted CQ Event work queue list */
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
|
||||
}
|
||||
@ -5809,6 +5854,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
|
||||
INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
|
||||
|
||||
/* Initialize mboxq lists. If the early init routines fail
|
||||
* these lists need to be correctly initialized.
|
||||
*/
|
||||
INIT_LIST_HEAD(&phba->sli.mboxq);
|
||||
INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
|
||||
|
||||
/* initialize optic_state to 0xFF */
|
||||
phba->sli4_hba.lnk_info.optic_state = 0xff;
|
||||
|
||||
@ -5874,6 +5925,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
"READ_NV, mbxStatus x%x\n",
|
||||
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
|
||||
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
rc = -EIO;
|
||||
goto out_free_bsmbx;
|
||||
}
|
||||
@ -6398,7 +6450,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
|
||||
|
||||
/* els xri-sgl book keeping */
|
||||
phba->sli4_hba.els_xri_cnt = 0;
|
||||
@ -7799,7 +7851,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
|
||||
|
||||
/* Create Fast Path FCP WQs */
|
||||
wqesize = (phba->fcp_embed_io) ?
|
||||
LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
|
||||
LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -7830,7 +7882,7 @@ int
|
||||
lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_queue *qdesc;
|
||||
int idx, io_channel, max;
|
||||
int idx, io_channel;
|
||||
|
||||
/*
|
||||
* Create HBA Record arrays.
|
||||
@ -7991,15 +8043,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
if (lpfc_alloc_nvme_wq_cq(phba, idx))
|
||||
goto out_error;
|
||||
|
||||
/* allocate MRQ CQs */
|
||||
max = phba->cfg_nvme_io_channel;
|
||||
if (max < phba->cfg_nvmet_mrq)
|
||||
max = phba->cfg_nvmet_mrq;
|
||||
|
||||
for (idx = 0; idx < max; idx++)
|
||||
if (lpfc_alloc_nvme_wq_cq(phba, idx))
|
||||
goto out_error;
|
||||
|
||||
if (phba->nvmet_support) {
|
||||
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
|
||||
qdesc = lpfc_sli4_queue_alloc(phba,
|
||||
@ -8221,11 +8264,11 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
|
||||
|
||||
/* Release FCP cqs */
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
|
||||
phba->cfg_fcp_io_channel);
|
||||
phba->cfg_fcp_io_channel);
|
||||
|
||||
/* Release FCP wqs */
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
|
||||
phba->cfg_fcp_io_channel);
|
||||
phba->cfg_fcp_io_channel);
|
||||
|
||||
/* Release FCP CQ mapping array */
|
||||
lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
|
||||
@ -8571,15 +8614,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0528 %s not allocated\n",
|
||||
phba->sli4_hba.mbx_cq ?
|
||||
"Mailbox WQ" : "Mailbox CQ");
|
||||
"Mailbox WQ" : "Mailbox CQ");
|
||||
rc = -ENOMEM;
|
||||
goto out_destroy;
|
||||
}
|
||||
|
||||
rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
|
||||
phba->sli4_hba.mbx_cq,
|
||||
phba->sli4_hba.mbx_wq,
|
||||
NULL, 0, LPFC_MBOX);
|
||||
phba->sli4_hba.mbx_cq,
|
||||
phba->sli4_hba.mbx_wq,
|
||||
NULL, 0, LPFC_MBOX);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
|
||||
@ -9934,17 +9977,19 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
{
|
||||
int wait_time = 0;
|
||||
int nvme_xri_cmpl = 1;
|
||||
int nvmet_xri_cmpl = 1;
|
||||
int fcp_xri_cmpl = 1;
|
||||
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
||||
int nvmet_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
|
||||
fcp_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
nvme_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
||||
nvmet_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
|
||||
}
|
||||
|
||||
while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
|
||||
!nvmet_xri_cmpl) {
|
||||
@ -9970,9 +10015,12 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
|
||||
wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
|
||||
}
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
nvme_xri_cmpl = list_empty(
|
||||
&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
||||
nvmet_xri_cmpl = list_empty(
|
||||
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
|
||||
}
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
|
||||
fcp_xri_cmpl = list_empty(
|
||||
@ -9981,8 +10029,6 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
els_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
||||
|
||||
nvmet_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
|
||||
}
|
||||
}
|
||||
|
||||
@ -10048,9 +10094,14 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
|
||||
/* Stop kthread signal shall trigger work_done one more time */
|
||||
kthread_stop(phba->worker_thread);
|
||||
|
||||
/* Unset the queues shared with the hardware then release all
|
||||
* allocated resources.
|
||||
*/
|
||||
lpfc_sli4_queue_unset(phba);
|
||||
lpfc_sli4_queue_destroy(phba);
|
||||
|
||||
/* Reset SLI4 HBA FCoE function */
|
||||
lpfc_pci_function_reset(phba);
|
||||
lpfc_sli4_queue_destroy(phba);
|
||||
|
||||
/* Stop the SLI4 device port */
|
||||
phba->pport->work_port_events = 0;
|
||||
@ -10306,6 +10357,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
}
|
||||
|
||||
/* Initialize and populate the iocb list per host */
|
||||
|
||||
error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
|
||||
if (error) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -11051,7 +11103,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
struct lpfc_hba *phba;
|
||||
struct lpfc_vport *vport = NULL;
|
||||
struct Scsi_Host *shost = NULL;
|
||||
int error;
|
||||
int error, cnt;
|
||||
uint32_t cfg_mode, intr_mode;
|
||||
|
||||
/* Allocate memory for HBA structure */
|
||||
@ -11085,12 +11137,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
goto out_unset_pci_mem_s4;
|
||||
}
|
||||
|
||||
/* Initialize and populate the iocb list per host */
|
||||
cnt = phba->cfg_iocb_cnt * 1024;
|
||||
if (phba->nvmet_support)
|
||||
cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq;
|
||||
|
||||
/* Initialize and populate the iocb list per host */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"2821 initialize iocb list %d.\n",
|
||||
phba->cfg_iocb_cnt*1024);
|
||||
error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
|
||||
"2821 initialize iocb list %d total %d\n",
|
||||
phba->cfg_iocb_cnt, cnt);
|
||||
error = lpfc_init_iocb_list(phba, cnt);
|
||||
|
||||
if (error) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -11177,7 +11232,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
if ((phba->nvmet_support == 0) &&
|
||||
(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
|
||||
/* Create NVME binding with nvme_fc_transport. This
|
||||
* ensures the vport is initialized.
|
||||
* ensures the vport is initialized. If the localport
|
||||
* create fails, it should not unload the driver to
|
||||
* support field issues.
|
||||
*/
|
||||
error = lpfc_nvme_create_localport(vport);
|
||||
if (error) {
|
||||
@ -11185,7 +11242,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
"6004 NVME registration failed, "
|
||||
"error x%x\n",
|
||||
error);
|
||||
goto out_disable_intr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -11984,6 +12040,7 @@ int
|
||||
lpfc_fof_queue_create(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_queue *qdesc;
|
||||
uint32_t wqesize;
|
||||
|
||||
/* Create FOF EQ */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
|
||||
@ -12004,8 +12061,11 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.oas_cq = qdesc;
|
||||
|
||||
/* Create OAS WQ */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
|
||||
wqesize = (phba->fcp_embed_io) ?
|
||||
LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
|
||||
phba->sli4_hba.wq_ecount);
|
||||
|
||||
if (!qdesc)
|
||||
goto out_error;
|
||||
|
||||
|
@ -2083,9 +2083,12 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
|
||||
if (phba->max_vpi && phba->cfg_enable_npiv)
|
||||
bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
|
||||
|
||||
if (phba->nvmet_support)
|
||||
if (phba->nvmet_support) {
|
||||
bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1);
|
||||
|
||||
/* iaab/iaar NOT set for now */
|
||||
bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
|
||||
bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -361,8 +361,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
case NLP_STE_PRLI_ISSUE:
|
||||
case NLP_STE_UNMAPPED_NODE:
|
||||
case NLP_STE_MAPPED_NODE:
|
||||
/* lpfc_plogi_confirm_nport skips fabric did, handle it here */
|
||||
if (!(ndlp->nlp_type & NLP_FABRIC)) {
|
||||
/* For initiators, lpfc_plogi_confirm_nport skips fabric did.
|
||||
* For target mode, execute implicit logo.
|
||||
* Fabric nodes go into NPR.
|
||||
*/
|
||||
if (!(ndlp->nlp_type & NLP_FABRIC) &&
|
||||
!(phba->nvmet_support)) {
|
||||
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
|
||||
ndlp, NULL);
|
||||
return 1;
|
||||
|
@ -401,6 +401,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct ulp_bde64 *bpl;
|
||||
struct lpfc_dmabuf *bmp;
|
||||
uint16_t ntype, nstate;
|
||||
|
||||
/* there are two dma buf in the request, actually there is one and
|
||||
* the second one is just the start address + cmd size.
|
||||
@ -417,11 +418,26 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
|
||||
vport = lport->vport;
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
|
||||
if (!ndlp) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6043 Could not find node for DID %x\n",
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
|
||||
"6051 DID x%06x not an active rport.\n",
|
||||
pnvme_rport->port_id);
|
||||
return 1;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* The remote node has to be a mapped nvme target or an
|
||||
* unmapped nvme initiator or it's an error.
|
||||
*/
|
||||
ntype = ndlp->nlp_type;
|
||||
nstate = ndlp->nlp_state;
|
||||
if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
|
||||
(ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
|
||||
"6088 DID x%06x not ready for "
|
||||
"IO. State x%x, Type x%x\n",
|
||||
pnvme_rport->port_id,
|
||||
ndlp->nlp_state, ndlp->nlp_type);
|
||||
return -ENODEV;
|
||||
}
|
||||
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
||||
if (!bmp) {
|
||||
@ -456,7 +472,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
|
||||
|
||||
/* Expand print to include key fields. */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6051 ENTER. lport %p, rport %p lsreq%p rqstlen:%d "
|
||||
"6149 ENTER. lport %p, rport %p lsreq%p rqstlen:%d "
|
||||
"rsplen:%d %pad %pad\n",
|
||||
pnvme_lport, pnvme_rport,
|
||||
pnvme_lsreq, pnvme_lsreq->rqstlen,
|
||||
@ -745,6 +761,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
struct nvme_fc_cmd_iu *cp;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_nvme_fcpreq_priv *freqpriv;
|
||||
unsigned long flags;
|
||||
uint32_t code;
|
||||
uint16_t cid, sqhd, data;
|
||||
@ -772,9 +789,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
ndlp = rport->ndlp;
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
|
||||
"6061 rport %p, ndlp %p, DID x%06x ndlp "
|
||||
"not ready.\n",
|
||||
rport, ndlp, rport->remoteport->port_id);
|
||||
"6061 rport %p, DID x%06x node not ready.\n",
|
||||
rport, rport->remoteport->port_id);
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
|
||||
if (!ndlp) {
|
||||
@ -853,15 +869,18 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
break;
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6081 NVME Completion Protocol Error: "
|
||||
"status x%x result x%x placed x%x\n",
|
||||
"xri %x status x%x result x%x "
|
||||
"placed x%x\n",
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag,
|
||||
lpfc_ncmd->status, lpfc_ncmd->result,
|
||||
wcqe->total_data_placed);
|
||||
break;
|
||||
default:
|
||||
out_err:
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6072 NVME Completion Error: "
|
||||
"6072 NVME Completion Error: xri %x "
|
||||
"status x%x result x%x placed x%x\n",
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag,
|
||||
lpfc_ncmd->status, lpfc_ncmd->result,
|
||||
wcqe->total_data_placed);
|
||||
nCmd->transferred_length = 0;
|
||||
@ -900,6 +919,8 @@ out_err:
|
||||
phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
|
||||
}
|
||||
#endif
|
||||
freqpriv = nCmd->private;
|
||||
freqpriv->nvme_buf = NULL;
|
||||
nCmd->done(nCmd);
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
@ -1099,12 +1120,12 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
|
||||
first_data_sgl = sgl;
|
||||
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
|
||||
if (lpfc_ncmd->seg_cnt > phba->cfg_sg_seg_cnt) {
|
||||
if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6058 Too many sg segments from "
|
||||
"NVME Transport. Max %d, "
|
||||
"nvmeIO sg_cnt %d\n",
|
||||
phba->cfg_sg_seg_cnt,
|
||||
phba->cfg_nvme_seg_cnt,
|
||||
lpfc_ncmd->seg_cnt);
|
||||
lpfc_ncmd->seg_cnt = 0;
|
||||
return 1;
|
||||
@ -1196,6 +1217,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
struct lpfc_nvme_buf *lpfc_ncmd;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct lpfc_nvme_qhandle *lpfc_queue_info;
|
||||
struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
uint64_t start = 0;
|
||||
#endif
|
||||
@ -1274,7 +1296,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
* Do not let the IO hang out forever. There is no midlayer issuing
|
||||
* an abort so inform the FW of the maximum IO pending time.
|
||||
*/
|
||||
pnvme_fcreq->private = (void *)lpfc_ncmd;
|
||||
freqpriv->nvme_buf = lpfc_ncmd;
|
||||
lpfc_ncmd->nvmeCmd = pnvme_fcreq;
|
||||
lpfc_ncmd->nrport = rport;
|
||||
lpfc_ncmd->ndlp = ndlp;
|
||||
@ -1404,6 +1426,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
struct lpfc_nvme_buf *lpfc_nbuf;
|
||||
struct lpfc_iocbq *abts_buf;
|
||||
struct lpfc_iocbq *nvmereq_wqe;
|
||||
struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
|
||||
union lpfc_wqe *abts_wqe;
|
||||
unsigned long flags;
|
||||
int ret_val;
|
||||
@ -1414,7 +1437,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
phba = vport->phba;
|
||||
|
||||
/* Announce entry to new IO submit field. */
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6002 Abort Request to rport DID x%06x "
|
||||
"for nvme_fc_req %p\n",
|
||||
pnvme_rport->port_id,
|
||||
@ -1444,7 +1467,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
/* The remote node has to be ready to send an abort. */
|
||||
if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
|
||||
!(ndlp->nlp_type & NLP_NVME_TARGET)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6048 rport %p, DID x%06x not ready for "
|
||||
"IO. State x%x, Type x%x\n",
|
||||
rport, pnvme_rport->port_id,
|
||||
@ -1459,27 +1482,28 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
/* driver queued commands are in process of being flushed */
|
||||
if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6139 Driver in reset cleanup - flushing "
|
||||
"NVME Req now. hba_flag x%x\n",
|
||||
phba->hba_flag);
|
||||
return;
|
||||
}
|
||||
|
||||
lpfc_nbuf = (struct lpfc_nvme_buf *)pnvme_fcreq->private;
|
||||
lpfc_nbuf = freqpriv->nvme_buf;
|
||||
if (!lpfc_nbuf) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6140 NVME IO req has no matching lpfc nvme "
|
||||
"io buffer. Skipping abort req.\n");
|
||||
return;
|
||||
} else if (!lpfc_nbuf->nvmeCmd) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6141 lpfc NVME IO req has no nvme_fcreq "
|
||||
"io buffer. Skipping abort req.\n");
|
||||
return;
|
||||
}
|
||||
nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
|
||||
|
||||
/*
|
||||
* The lpfc_nbuf and the mapped nvme_fcreq in the driver's
|
||||
@ -1490,23 +1514,22 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
*/
|
||||
if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6143 NVME req mismatch: "
|
||||
"lpfc_nbuf %p nvmeCmd %p, "
|
||||
"pnvme_fcreq %p. Skipping Abort\n",
|
||||
"pnvme_fcreq %p. Skipping Abort xri x%x\n",
|
||||
lpfc_nbuf, lpfc_nbuf->nvmeCmd,
|
||||
pnvme_fcreq);
|
||||
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Don't abort IOs no longer on the pending queue. */
|
||||
nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
|
||||
if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6142 NVME IO req %p not queued - skipping "
|
||||
"abort req\n",
|
||||
pnvme_fcreq);
|
||||
"abort req xri x%x\n",
|
||||
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1517,21 +1540,22 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
/* Outstanding abort is in progress */
|
||||
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6144 Outstanding NVME I/O Abort Request "
|
||||
"still pending on nvme_fcreq %p, "
|
||||
"lpfc_ncmd %p\n",
|
||||
pnvme_fcreq, lpfc_nbuf);
|
||||
"lpfc_ncmd %p xri x%x\n",
|
||||
pnvme_fcreq, lpfc_nbuf,
|
||||
nvmereq_wqe->sli4_xritag);
|
||||
return;
|
||||
}
|
||||
|
||||
abts_buf = __lpfc_sli_get_iocbq(phba);
|
||||
if (!abts_buf) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6136 No available abort wqes. Skipping "
|
||||
"Abts req for nvme_fcreq %p.\n",
|
||||
pnvme_fcreq);
|
||||
"Abts req for nvme_fcreq %p xri x%x\n",
|
||||
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1580,7 +1604,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
if (ret_val == IOCB_ERROR) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6137 Failed abts issue_wqe with status x%x "
|
||||
"for nvme_fcreq %p.\n",
|
||||
ret_val, pnvme_fcreq);
|
||||
@ -1588,8 +1612,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
return;
|
||||
}
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
|
||||
"6138 Transport Abort NVME Request Issued for\n"
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6138 Transport Abort NVME Request Issued for "
|
||||
"ox_id x%x on reqtag x%x\n",
|
||||
nvmereq_wqe->sli4_xritag,
|
||||
abts_buf->iotag);
|
||||
@ -1618,7 +1642,7 @@ static struct nvme_fc_port_template lpfc_nvme_template = {
|
||||
.local_priv_sz = sizeof(struct lpfc_nvme_lport),
|
||||
.remote_priv_sz = sizeof(struct lpfc_nvme_rport),
|
||||
.lsrqst_priv_sz = 0,
|
||||
.fcprqst_priv_sz = 0,
|
||||
.fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
|
||||
};
|
||||
|
||||
/**
|
||||
@ -2049,7 +2073,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
if (lpfc_test_rrq_active(phba, ndlp,
|
||||
lpfc_ncmd->cur_iocbq.sli4_lxritag))
|
||||
continue;
|
||||
list_del(&lpfc_ncmd->list);
|
||||
list_del_init(&lpfc_ncmd->list);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
@ -2064,7 +2088,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
if (lpfc_test_rrq_active(
|
||||
phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
|
||||
continue;
|
||||
list_del(&lpfc_ncmd->list);
|
||||
list_del_init(&lpfc_ncmd->list);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
@ -2092,6 +2116,12 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
|
||||
|
||||
lpfc_ncmd->nonsg_phys = 0;
|
||||
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6310 XB release deferred for "
|
||||
"ox_id x%x on reqtag x%x\n",
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag,
|
||||
lpfc_ncmd->cur_iocbq.iotag);
|
||||
|
||||
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
|
||||
iflag);
|
||||
lpfc_ncmd->nvmeCmd = NULL;
|
||||
@ -2142,8 +2172,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||
nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
|
||||
nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
|
||||
|
||||
/* For now need + 1 to get around NVME transport logic */
|
||||
lpfc_nvme_template.max_sgl_segments = phba->cfg_sg_seg_cnt + 1;
|
||||
/* Limit to LPFC_MAX_NVME_SEG_CNT.
|
||||
* For now need + 1 to get around NVME transport logic.
|
||||
*/
|
||||
if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
|
||||
"6300 Reducing sg segment cnt to %d\n",
|
||||
LPFC_MAX_NVME_SEG_CNT);
|
||||
phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
|
||||
} else {
|
||||
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
|
||||
}
|
||||
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
|
||||
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
|
||||
|
||||
/* localport is allocated from the stack, but the registration
|
||||
@ -2249,12 +2289,23 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||
void
|
||||
lpfc_nvme_update_localport(struct lpfc_vport *vport)
|
||||
{
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
|
||||
localport = vport->localport;
|
||||
if (!localport) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
|
||||
"6710 Update NVME fail. No localport\n");
|
||||
return;
|
||||
}
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
|
||||
if (!lport) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
|
||||
"6171 Update NVME fail. localP %p, No lport\n",
|
||||
localport);
|
||||
return;
|
||||
}
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
||||
"6012 Update NVME lport %p did x%x\n",
|
||||
localport, vport->fc_myDID);
|
||||
@ -2268,7 +2319,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6030 bound lport %p to DID x%06x\n",
|
||||
lport, localport->port_id);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
@ -2409,6 +2460,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct nvme_fc_remote_port *remoteport;
|
||||
unsigned long wait_tmo;
|
||||
|
||||
localport = vport->localport;
|
||||
|
||||
@ -2451,11 +2503,12 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
* before proceeding. This guarantees the transport and driver
|
||||
* have completed the unreg process.
|
||||
*/
|
||||
ret = wait_for_completion_timeout(&rport->rport_unreg_done, 5);
|
||||
wait_tmo = msecs_to_jiffies(5000);
|
||||
ret = wait_for_completion_timeout(&rport->rport_unreg_done,
|
||||
wait_tmo);
|
||||
if (ret == 0) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6169 Unreg nvme wait failed %d\n",
|
||||
ret);
|
||||
"6169 Unreg nvme wait timeout\n");
|
||||
}
|
||||
}
|
||||
return;
|
||||
@ -2463,7 +2516,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
input_err:
|
||||
#endif
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6168: State error: lport %p, rport%p FCID x%06x\n",
|
||||
"6168 State error: lport %p, rport%p FCID x%06x\n",
|
||||
vport->localport, ndlp->rport, ndlp->nlp_DID);
|
||||
}
|
||||
|
||||
@ -2494,7 +2547,7 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
|
||||
&phba->sli4_hba.lpfc_abts_nvme_buf_list,
|
||||
list) {
|
||||
if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
|
||||
list_del(&lpfc_ncmd->list);
|
||||
list_del_init(&lpfc_ncmd->list);
|
||||
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
|
||||
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
||||
spin_unlock(
|
||||
@ -2510,6 +2563,12 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
|
||||
rxid, 1);
|
||||
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6311 XRI Aborted xri x%x tag x%x "
|
||||
"released\n",
|
||||
xri, lpfc_ncmd->cur_iocbq.iotag);
|
||||
|
||||
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||
if (rrq_empty)
|
||||
lpfc_worker_wake_up(phba);
|
||||
@ -2518,4 +2577,8 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6312 XRI Aborted xri x%x not found\n", xri);
|
||||
|
||||
}
|
||||
|
@ -21,12 +21,7 @@
|
||||
* included with this package. *
|
||||
********************************************************************/
|
||||
|
||||
#define LPFC_NVME_MIN_SEGS 16
|
||||
#define LPFC_NVME_DEFAULT_SEGS 66 /* 256K IOs - 64 + 2 */
|
||||
#define LPFC_NVME_MAX_SEGS 510
|
||||
#define LPFC_NVMET_MIN_POSTBUF 16
|
||||
#define LPFC_NVMET_DEFAULT_POSTBUF 1024
|
||||
#define LPFC_NVMET_MAX_POSTBUF 4096
|
||||
#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
|
||||
#define LPFC_NVME_WQSIZE 256
|
||||
|
||||
#define LPFC_NVME_ERSP_LEN 0x20
|
||||
@ -102,3 +97,7 @@ struct lpfc_nvme_buf {
|
||||
uint64_t ts_data_nvme;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct lpfc_nvme_fcpreq_priv {
|
||||
struct lpfc_nvme_buf *nvme_buf;
|
||||
};
|
||||
|
@ -71,6 +71,26 @@ static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
|
||||
struct lpfc_nvmet_rcv_ctx *,
|
||||
uint32_t, uint16_t);
|
||||
|
||||
void
|
||||
lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
|
||||
{
|
||||
unsigned long iflag;
|
||||
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6313 NVMET Defer ctx release xri x%x flg x%x\n",
|
||||
ctxp->oxid, ctxp->flag);
|
||||
|
||||
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
|
||||
if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
|
||||
iflag);
|
||||
return;
|
||||
}
|
||||
ctxp->flag |= LPFC_NVMET_CTX_RLS;
|
||||
list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
|
||||
* @phba: Pointer to HBA context object.
|
||||
@ -139,6 +159,11 @@ lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
|
||||
struct lpfc_dmabuf *mp)
|
||||
{
|
||||
if (ctxp) {
|
||||
if (ctxp->flag)
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6314 rq_post ctx xri x%x flag x%x\n",
|
||||
ctxp->oxid, ctxp->flag);
|
||||
|
||||
if (ctxp->txrdy) {
|
||||
pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
|
||||
ctxp->txrdy_phys);
|
||||
@ -337,39 +362,55 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
#endif
|
||||
|
||||
ctxp = cmdwqe->context2;
|
||||
ctxp->flag &= ~LPFC_NVMET_IO_INP;
|
||||
|
||||
rsp = &ctxp->ctx.fcp_req;
|
||||
op = rsp->op;
|
||||
ctxp->flag &= ~LPFC_NVMET_IO_INP;
|
||||
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe);
|
||||
result = wcqe->parameter;
|
||||
|
||||
if (!phba->targetport)
|
||||
goto out;
|
||||
if (phba->targetport)
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
else
|
||||
tgtp = NULL;
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
|
||||
ctxp->oxid, op, status);
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
if (status) {
|
||||
rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
|
||||
rsp->transferred_length = 0;
|
||||
atomic_inc(&tgtp->xmt_fcp_rsp_error);
|
||||
if (tgtp)
|
||||
atomic_inc(&tgtp->xmt_fcp_rsp_error);
|
||||
|
||||
/* pick up SLI4 exhange busy condition */
|
||||
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
|
||||
ctxp->flag |= LPFC_NVMET_XBUSY;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
|
||||
ctxp->oxid, status, result);
|
||||
} else {
|
||||
ctxp->flag &= ~LPFC_NVMET_XBUSY;
|
||||
}
|
||||
|
||||
} else {
|
||||
rsp->fcp_error = NVME_SC_SUCCESS;
|
||||
if (op == NVMET_FCOP_RSP)
|
||||
rsp->transferred_length = rsp->rsplen;
|
||||
else
|
||||
rsp->transferred_length = rsp->transfer_length;
|
||||
atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
|
||||
if (tgtp)
|
||||
atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
|
||||
}
|
||||
|
||||
out:
|
||||
if ((op == NVMET_FCOP_READDATA_RSP) ||
|
||||
(op == NVMET_FCOP_RSP)) {
|
||||
/* Sanity check */
|
||||
ctxp->state = LPFC_NVMET_STE_DONE;
|
||||
ctxp->entry_cnt++;
|
||||
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
if (phba->ktime_on) {
|
||||
if (rsp->op == NVMET_FCOP_READDATA_RSP) {
|
||||
@ -517,7 +558,6 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
|
||||
struct lpfc_hba *phba = ctxp->phba;
|
||||
struct lpfc_iocbq *nvmewqeq;
|
||||
unsigned long iflags;
|
||||
int rc;
|
||||
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
@ -543,10 +583,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||
#endif
|
||||
|
||||
/* Sanity check */
|
||||
if (ctxp->state == LPFC_NVMET_STE_ABORT) {
|
||||
if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
|
||||
(ctxp->state == LPFC_NVMET_STE_ABORT)) {
|
||||
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6102 Bad state IO x%x aborted\n",
|
||||
"6102 IO xri x%x aborted\n",
|
||||
ctxp->oxid);
|
||||
rc = -ENXIO;
|
||||
goto aerr;
|
||||
@ -571,10 +612,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
|
||||
ctxp->oxid, rsp->op, rsp->rsplen);
|
||||
|
||||
/* For now we take hbalock */
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
if (rc == WQE_SUCCESS) {
|
||||
ctxp->flag |= LPFC_NVMET_IO_INP;
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
@ -619,16 +657,27 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp =
|
||||
container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
|
||||
struct lpfc_hba *phba = ctxp->phba;
|
||||
unsigned long flags;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6103 Abort op: oxri x%x %d cnt %d\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
"6103 Abort op: oxri x%x flg x%x cnt %d\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x state x%x cnt x%x\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
|
||||
"xri x%x flg x%x cnt x%x\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
|
||||
|
||||
atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
|
||||
ctxp->entry_cnt++;
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
|
||||
/* Since iaab/iaar are NOT set, we need to check
|
||||
* if the firmware is in process of aborting IO
|
||||
*/
|
||||
if (ctxp->flag & LPFC_NVMET_XBUSY) {
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
return;
|
||||
}
|
||||
ctxp->flag |= LPFC_NVMET_ABORT_OP;
|
||||
if (ctxp->flag & LPFC_NVMET_IO_INP)
|
||||
lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
@ -636,13 +685,13 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
||||
else
|
||||
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
ctxp->oxid);
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
|
||||
struct nvmefc_tgt_fcp_req *rsp)
|
||||
{
|
||||
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp =
|
||||
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
|
||||
struct lpfc_hba *phba = ctxp->phba;
|
||||
@ -650,27 +699,20 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
|
||||
bool aborting = false;
|
||||
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
if (ctxp->flag & LPFC_NVMET_ABORT_OP) {
|
||||
if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
|
||||
(ctxp->flag & LPFC_NVMET_XBUSY)) {
|
||||
aborting = true;
|
||||
ctxp->flag |= LPFC_NVMET_CTX_RLS;
|
||||
/* let the abort path do the real release */
|
||||
lpfc_nvmet_defer_release(phba, ctxp);
|
||||
}
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
|
||||
if (aborting)
|
||||
/* let the abort path do the real release */
|
||||
return;
|
||||
|
||||
/* Sanity check */
|
||||
if (ctxp->state != LPFC_NVMET_STE_DONE) {
|
||||
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6117 Bad state IO x%x aborted\n",
|
||||
ctxp->oxid);
|
||||
}
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
|
||||
ctxp->state, 0);
|
||||
|
||||
if (aborting)
|
||||
return;
|
||||
|
||||
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
|
||||
}
|
||||
|
||||
@ -708,8 +750,19 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
|
||||
pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
|
||||
pinfo.port_id = vport->fc_myDID;
|
||||
|
||||
/* Limit to LPFC_MAX_NVME_SEG_CNT.
|
||||
* For now need + 1 to get around NVME transport logic.
|
||||
*/
|
||||
if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
|
||||
"6400 Reducing sg segment cnt to %d\n",
|
||||
LPFC_MAX_NVME_SEG_CNT);
|
||||
phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
|
||||
} else {
|
||||
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
|
||||
}
|
||||
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
|
||||
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
|
||||
lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt;
|
||||
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
|
||||
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
|
||||
NVMET_FCTGTFEAT_CMD_IN_ISR |
|
||||
@ -794,7 +847,120 @@ void
|
||||
lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||
struct sli4_wcqe_xri_aborted *axri)
|
||||
{
|
||||
/* TODO: work in progress */
|
||||
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
|
||||
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
unsigned long iflag = 0;
|
||||
int rrq_empty = 0;
|
||||
bool released = false;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
|
||||
|
||||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return;
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
list_for_each_entry_safe(ctxp, next_ctxp,
|
||||
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
|
||||
list) {
|
||||
if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
|
||||
continue;
|
||||
|
||||
/* Check if we already received a free context call
|
||||
* and we have completed processing an abort situation.
|
||||
*/
|
||||
if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
|
||||
!(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
|
||||
list_del(&ctxp->list);
|
||||
released = true;
|
||||
}
|
||||
ctxp->flag &= ~LPFC_NVMET_XBUSY;
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
|
||||
rrq_empty = list_empty(&phba->active_rrq_list);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
|
||||
if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
|
||||
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
|
||||
ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
|
||||
lpfc_set_rrq_active(phba, ndlp,
|
||||
ctxp->rqb_buffer->sglq->sli4_lxritag,
|
||||
rxid, 1);
|
||||
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6318 XB aborted %x flg x%x (%x)\n",
|
||||
ctxp->oxid, ctxp->flag, released);
|
||||
if (released)
|
||||
lpfc_nvmet_rq_post(phba, ctxp,
|
||||
&ctxp->rqb_buffer->hbuf);
|
||||
if (rrq_empty)
|
||||
lpfc_worker_wake_up(phba);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
}
|
||||
|
||||
int
|
||||
lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
|
||||
struct fc_frame_header *fc_hdr)
|
||||
|
||||
{
|
||||
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
|
||||
struct nvmefc_tgt_fcp_req *rsp;
|
||||
uint16_t xri;
|
||||
unsigned long iflag = 0;
|
||||
|
||||
xri = be16_to_cpu(fc_hdr->fh_ox_id);
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
list_for_each_entry_safe(ctxp, next_ctxp,
|
||||
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
|
||||
list) {
|
||||
if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
|
||||
continue;
|
||||
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
|
||||
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
||||
ctxp->flag |= LPFC_NVMET_ABTS_RCV;
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
||||
|
||||
lpfc_nvmeio_data(phba,
|
||||
"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
|
||||
xri, smp_processor_id(), 0);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
|
||||
|
||||
rsp = &ctxp->ctx.fcp_req;
|
||||
nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
|
||||
|
||||
/* Respond with BA_ACC accordingly */
|
||||
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
|
||||
xri, smp_processor_id(), 1);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
|
||||
|
||||
/* Respond with BA_RJT accordingly */
|
||||
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
@ -876,7 +1042,6 @@ dropit:
|
||||
ctxp->wqeq = NULL;
|
||||
ctxp->state = LPFC_NVMET_STE_RCV;
|
||||
ctxp->rqb_buffer = (void *)nvmebuf;
|
||||
spin_lock_init(&ctxp->ctxlock);
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
|
||||
oxid, size, sid);
|
||||
@ -985,6 +1150,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||
ctxp->rqb_buffer = nvmebuf;
|
||||
ctxp->entry_cnt = 1;
|
||||
ctxp->flag = 0;
|
||||
spin_lock_init(&ctxp->ctxlock);
|
||||
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
if (phba->ktime_on) {
|
||||
@ -1007,8 +1173,8 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||
}
|
||||
#endif
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d from %06x\n",
|
||||
oxid, size, sid);
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
|
||||
oxid, size, smp_processor_id());
|
||||
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_in);
|
||||
/*
|
||||
@ -1282,11 +1448,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) {
|
||||
if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
|
||||
"NPORT x%x oxid:x%x\n",
|
||||
ctxp->sid, ctxp->oxid);
|
||||
"NPORT x%x oxid:x%x cnt %d\n",
|
||||
ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1648,18 +1814,27 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
atomic_inc(&tgtp->xmt_abort_cmpl);
|
||||
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6165 Abort cmpl: xri x%x WCQE: %08x %08x %08x %08x\n",
|
||||
ctxp->oxid, wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
|
||||
ctxp->state = LPFC_NVMET_STE_DONE;
|
||||
|
||||
/* Check if we already received a free context call
|
||||
* and we have completed processing an abort situation.
|
||||
*/
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
if (ctxp->flag & LPFC_NVMET_CTX_RLS)
|
||||
if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
|
||||
!(ctxp->flag & LPFC_NVMET_XBUSY)) {
|
||||
list_del(&ctxp->list);
|
||||
released = true;
|
||||
}
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6165 ABORT cmpl: xri x%x flg x%x (%d) "
|
||||
"WCQE: %08x %08x %08x %08x\n",
|
||||
ctxp->oxid, ctxp->flag, released,
|
||||
wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
|
||||
/*
|
||||
* if transport has released ctx, then can reuse it. Otherwise,
|
||||
* will be recycled by transport release call.
|
||||
@ -1670,10 +1845,15 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
lpfc_sli_release_iocbq(phba, cmdwqe);
|
||||
|
||||
/* Since iaab/iaar are NOT set, there is no work left.
|
||||
* For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
|
||||
* should have been called already.
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvmet_xmt_fcp_abort_cmp - Completion handler for ABTS
|
||||
* lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @cmdwqe: Pointer to driver command WQE object.
|
||||
* @wcqe: Pointer to driver response CQE object.
|
||||
@ -1683,8 +1863,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
* The function frees memory resources used for the NVME commands.
|
||||
**/
|
||||
static void
|
||||
lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
struct lpfc_wcqe_complete *wcqe)
|
||||
lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
struct lpfc_wcqe_complete *wcqe)
|
||||
{
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
@ -1699,35 +1879,55 @@ lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
atomic_inc(&tgtp->xmt_abort_cmpl);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6070 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
|
||||
ctxp, wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
|
||||
if (ctxp) {
|
||||
/* Sanity check */
|
||||
if (ctxp->state != LPFC_NVMET_STE_ABORT) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6112 ABORT Wrong state:%d oxid x%x\n",
|
||||
ctxp->state, ctxp->oxid);
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_DONE;
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
if (ctxp->flag & LPFC_NVMET_CTX_RLS)
|
||||
released = true;
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
|
||||
/*
|
||||
* if transport has released ctx, then can reuse it. Otherwise,
|
||||
* will be recycled by transport release call.
|
||||
*/
|
||||
if (released)
|
||||
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
|
||||
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
if (!ctxp) {
|
||||
/* if context is clear, related io alrady complete */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
|
||||
wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Sanity check */
|
||||
if (ctxp->state != LPFC_NVMET_STE_ABORT) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6112 ABTS Wrong state:%d oxid x%x\n",
|
||||
ctxp->state, ctxp->oxid);
|
||||
}
|
||||
|
||||
/* Check if we already received a free context call
|
||||
* and we have completed processing an abort situation.
|
||||
*/
|
||||
ctxp->state = LPFC_NVMET_STE_DONE;
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
|
||||
!(ctxp->flag & LPFC_NVMET_XBUSY)) {
|
||||
list_del(&ctxp->list);
|
||||
released = true;
|
||||
}
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6316 ABTS cmpl xri x%x flg x%x (%x) "
|
||||
"WCQE: %08x %08x %08x %08x\n",
|
||||
ctxp->oxid, ctxp->flag, released,
|
||||
wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
/*
|
||||
* if transport has released ctx, then can reuse it. Otherwise,
|
||||
* will be recycled by transport release call.
|
||||
*/
|
||||
if (released)
|
||||
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
|
||||
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
|
||||
/* Since iaab/iaar are NOT set, there is no work left.
|
||||
* For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
|
||||
* should have been called already.
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1780,10 +1980,14 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6067 Abort: sid %x xri x%x/x%x\n",
|
||||
"6067 ABTS: sid %x xri x%x/x%x\n",
|
||||
sid, xri, ctxp->wqeq->sli4_xritag);
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
if (!ctxp->wqeq) {
|
||||
ctxp->wqeq = ctxp->rqb_buffer->iocbq;
|
||||
ctxp->wqeq->hba_wqidx = 0;
|
||||
}
|
||||
|
||||
ndlp = lpfc_findnode_did(phba->pport, sid);
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
|
||||
@ -1889,10 +2093,11 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
"6160 Drop ABTS - wrong NDLP state x%x.\n",
|
||||
"6160 Drop ABORT - wrong NDLP state x%x.\n",
|
||||
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
|
||||
|
||||
/* No failure to an ABTS request. */
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1900,9 +2105,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
|
||||
if (!ctxp->abort_wqeq) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
"6161 Abort failed: No wqeqs: "
|
||||
"6161 ABORT failed: No wqeqs: "
|
||||
"xri: x%x\n", ctxp->oxid);
|
||||
/* No failure to an ABTS request. */
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
return 0;
|
||||
}
|
||||
abts_wqeq = ctxp->abort_wqeq;
|
||||
@ -1910,8 +2116,8 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
ctxp->state = LPFC_NVMET_STE_ABORT;
|
||||
|
||||
/* Announce entry to new IO submit field. */
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6162 Abort Request to rport DID x%06x "
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6162 ABORT Request to rport DID x%06x "
|
||||
"for xri x%x x%x\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
|
||||
|
||||
@ -1927,6 +2133,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
"NVME Req now. hba_flag x%x oxid x%x\n",
|
||||
phba->hba_flag, ctxp->oxid);
|
||||
lpfc_sli_release_iocbq(phba, abts_wqeq);
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1938,6 +2145,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
"still pending on oxid x%x\n",
|
||||
ctxp->oxid);
|
||||
lpfc_sli_release_iocbq(phba, abts_wqeq);
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1985,9 +2193,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
if (rc == WQE_SUCCESS)
|
||||
return 0;
|
||||
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
lpfc_sli_release_iocbq(phba, abts_wqeq);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
||||
"6166 Failed abts issue_wqe with status x%x "
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6166 Failed ABORT issue_wqe with status x%x "
|
||||
"for oxid x%x.\n",
|
||||
rc, ctxp->oxid);
|
||||
return 1;
|
||||
@ -2016,8 +2225,8 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
abts_wqeq = ctxp->wqeq;
|
||||
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_abort_cmp;
|
||||
abts_wqeq->iocb_cmpl = 0;
|
||||
abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
|
||||
abts_wqeq->iocb_cmpl = NULL;
|
||||
abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
|
||||
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
@ -2027,7 +2236,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
}
|
||||
|
||||
aerr:
|
||||
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
|
||||
|
@ -21,9 +21,7 @@
|
||||
* included with this package. *
|
||||
********************************************************************/
|
||||
|
||||
#define LPFC_NVMET_MIN_SEGS 16
|
||||
#define LPFC_NVMET_DEFAULT_SEGS 64 /* 256K IOs */
|
||||
#define LPFC_NVMET_MAX_SEGS 510
|
||||
#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
|
||||
#define LPFC_NVMET_SUCCESS_LEN 12
|
||||
|
||||
/* Used for NVME Target */
|
||||
@ -77,6 +75,7 @@ struct lpfc_nvmet_rcv_ctx {
|
||||
struct nvmefc_tgt_ls_req ls_req;
|
||||
struct nvmefc_tgt_fcp_req fcp_req;
|
||||
} ctx;
|
||||
struct list_head list;
|
||||
struct lpfc_hba *phba;
|
||||
struct lpfc_iocbq *wqeq;
|
||||
struct lpfc_iocbq *abort_wqeq;
|
||||
@ -98,10 +97,11 @@ struct lpfc_nvmet_rcv_ctx {
|
||||
#define LPFC_NVMET_STE_RSP 4
|
||||
#define LPFC_NVMET_STE_DONE 5
|
||||
uint16_t flag;
|
||||
#define LPFC_NVMET_IO_INP 0x1
|
||||
#define LPFC_NVMET_ABORT_OP 0x2
|
||||
#define LPFC_NVMET_CTX_RLS 0x4
|
||||
|
||||
#define LPFC_NVMET_IO_INP 0x1 /* IO is in progress on exchange */
|
||||
#define LPFC_NVMET_ABORT_OP 0x2 /* Abort WQE issued on exchange */
|
||||
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
|
||||
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
|
||||
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
|
||||
struct rqb_dmabuf *rqb_buffer;
|
||||
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
|
@ -6338,7 +6338,7 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_repost_sgl_list - Repsot the buffers sgl pages as block
|
||||
* lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @pring: Pointer to driver SLI ring object.
|
||||
* @sgl_list: linked link of sgl buffers to post
|
||||
@ -13758,7 +13758,10 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
|
||||
lpfc_free_rq_buffer(queue->phba, queue);
|
||||
kfree(queue->rqbp);
|
||||
}
|
||||
kfree(queue->pring);
|
||||
|
||||
if (!list_empty(&queue->wq_list))
|
||||
list_del(&queue->wq_list);
|
||||
|
||||
kfree(queue);
|
||||
return;
|
||||
}
|
||||
@ -14738,6 +14741,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
||||
case LPFC_Q_CREATE_VERSION_1:
|
||||
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
|
||||
wq->entry_count);
|
||||
bf_set(lpfc_mbox_hdr_version, &shdr->request,
|
||||
LPFC_Q_CREATE_VERSION_1);
|
||||
|
||||
switch (wq->entry_size) {
|
||||
default:
|
||||
case 64:
|
||||
@ -15561,6 +15567,8 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
|
||||
}
|
||||
/* Remove wq from any list */
|
||||
list_del_init(&wq->list);
|
||||
kfree(wq->pring);
|
||||
wq->pring = NULL;
|
||||
mempool_free(mbox, wq->phba->mbox_mem_pool);
|
||||
return status;
|
||||
}
|
||||
@ -16513,7 +16521,7 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
|
||||
* This function sends a basic response to a previous unsol sequence abort
|
||||
* event after aborting the sequence handling.
|
||||
**/
|
||||
static void
|
||||
void
|
||||
lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
|
||||
struct fc_frame_header *fc_hdr, bool aborted)
|
||||
{
|
||||
@ -16534,14 +16542,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, sid);
|
||||
if (!ndlp) {
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, sid);
|
||||
if (!ndlp) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
|
||||
"1268 Failed to allocate ndlp for "
|
||||
"oxid:x%x SID:x%x\n", oxid, sid);
|
||||
return;
|
||||
}
|
||||
lpfc_nlp_init(vport, ndlp, sid);
|
||||
/* Put ndlp onto pport node list */
|
||||
lpfc_enqueue_node(vport, ndlp);
|
||||
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
|
||||
@ -16690,6 +16697,11 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
|
||||
}
|
||||
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
||||
|
||||
if (phba->nvmet_support) {
|
||||
lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Respond with BA_ACC or BA_RJT accordingly */
|
||||
lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
|
||||
}
|
||||
|
@ -620,7 +620,7 @@ struct lpfc_sli4_hba {
|
||||
struct list_head lpfc_els_sgl_list;
|
||||
struct list_head lpfc_abts_els_sgl_list;
|
||||
struct list_head lpfc_nvmet_sgl_list;
|
||||
struct list_head lpfc_abts_nvmet_sgl_list;
|
||||
struct list_head lpfc_abts_nvmet_ctx_list;
|
||||
struct list_head lpfc_abts_scsi_buf_list;
|
||||
struct list_head lpfc_abts_nvme_buf_list;
|
||||
struct lpfc_sglq **lpfc_sglq_active_list;
|
||||
|
@ -20,7 +20,7 @@
|
||||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "11.2.0.10"
|
||||
#define LPFC_DRIVER_VERSION "11.2.0.12"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
|
@ -738,10 +738,9 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
|
||||
ndlp = lpfc_findnode_did(vport, Fabric_DID);
|
||||
if (!ndlp) {
|
||||
/* Cannot find existing Fabric ndlp, allocate one */
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
ndlp = lpfc_nlp_init(vport, Fabric_DID);
|
||||
if (!ndlp)
|
||||
goto skip_logo;
|
||||
lpfc_nlp_init(vport, ndlp, Fabric_DID);
|
||||
/* Indicate free memory when release */
|
||||
NLP_SET_FREE_REQ(ndlp);
|
||||
} else {
|
||||
|
Loading…
Reference in New Issue
Block a user