mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 13:41:51 +00:00
scsi: lpfc: Fix NMI crash during rmmod due to circular hbalock dependency
Remove hbalock dependency for lpfc_abts_els_sgl_list and lpfc_abts_nvmet_ctx_list. The lists are adaquately synchronized with the sgl_list_lock and abts_nvmet_buf_list_lock. Link: https://lore.kernel.org/r/20210412013127.2387-5-jsmart2021@gmail.com Co-developed-by: Justin Tee <justin.tee@broadcom.com> Signed-off-by: Justin Tee <justin.tee@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
f866eb06c0
commit
a789241e49
@ -10072,8 +10072,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
|
||||
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
|
||||
unsigned long iflag = 0;
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
|
||||
list_for_each_entry_safe(sglq_entry, sglq_next,
|
||||
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
|
||||
if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
|
||||
@ -10081,8 +10080,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
|
||||
sglq_entry->ndlp = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -10109,8 +10107,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
|
||||
|
||||
pring = lpfc_phba_elsring(phba);
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
|
||||
list_for_each_entry_safe(sglq_entry, sglq_next,
|
||||
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
|
||||
if (sglq_entry->sli4_xritag == xri) {
|
||||
@ -10120,8 +10117,8 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
|
||||
list_add_tail(&sglq_entry->list,
|
||||
&phba->sli4_hba.lpfc_els_sgl_list);
|
||||
sglq_entry->state = SGL_FREED;
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
|
||||
iflag);
|
||||
|
||||
if (ndlp) {
|
||||
lpfc_set_rrq_active(phba, ndlp,
|
||||
@ -10136,21 +10133,18 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
|
||||
return;
|
||||
}
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
|
||||
lxri = lpfc_sli4_xri_inrange(phba, xri);
|
||||
if (lxri == NO_XRI) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
if (lxri == NO_XRI)
|
||||
return;
|
||||
}
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
sglq_entry = __lpfc_get_active_sglq(phba, lxri);
|
||||
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
return;
|
||||
}
|
||||
sglq_entry->state = SGL_XRI_ABORTED;
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
return;
|
||||
}
|
||||
|
@ -1043,12 +1043,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
* driver is unloading or reposted if the driver is restarting
|
||||
* the port.
|
||||
*/
|
||||
spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
|
||||
/* scsl_buf_list */
|
||||
|
||||
/* sgl_list_lock required because worker thread uses this
|
||||
* list.
|
||||
*/
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
|
||||
list_for_each_entry(sglq_entry,
|
||||
&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
|
||||
sglq_entry->state = SGL_FREED;
|
||||
@ -1057,11 +1056,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
&phba->sli4_hba.lpfc_els_sgl_list);
|
||||
|
||||
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
|
||||
|
||||
/* abts_xxxx_buf_list_lock required because worker thread uses this
|
||||
* list.
|
||||
*/
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
cnt = 0;
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
|
||||
qp = &phba->sli4_hba.hdwq[idx];
|
||||
@ -3804,12 +3804,10 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
|
||||
sglq_entry->state = SGL_FREED;
|
||||
list_add_tail(&sglq_entry->list, &els_sgl_list);
|
||||
}
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
|
||||
list_splice_init(&els_sgl_list,
|
||||
&phba->sli4_hba.lpfc_els_sgl_list);
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
|
||||
} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
|
||||
/* els xri-sgl shrinked */
|
||||
xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
|
||||
@ -3817,8 +3815,7 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
|
||||
"3158 ELS xri-sgl count decreased from "
|
||||
"%d to %d\n", phba->sli4_hba.els_xri_cnt,
|
||||
els_xri_cnt);
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
|
||||
list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
|
||||
&els_sgl_list);
|
||||
/* release extra els sgls from list */
|
||||
@ -3833,8 +3830,7 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
|
||||
}
|
||||
list_splice_init(&els_sgl_list,
|
||||
&phba->sli4_hba.lpfc_els_sgl_list);
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
|
||||
} else
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"3163 ELS xri-sgl count unchanged: %d\n",
|
||||
@ -7388,11 +7384,9 @@ lpfc_free_els_sgl_list(struct lpfc_hba *phba)
|
||||
LIST_HEAD(sglq_list);
|
||||
|
||||
/* Retrieve all els sgls from driver list */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
|
||||
list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
|
||||
|
||||
/* Now free the sgl list */
|
||||
lpfc_free_sgl_list(phba, &sglq_list);
|
||||
|
@ -1440,7 +1440,10 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
|
||||
list_del_init(&ctx_buf->list);
|
||||
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
||||
|
||||
spin_lock(&phba->hbalock);
|
||||
__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
|
||||
spin_unlock(&phba->hbalock);
|
||||
|
||||
ctx_buf->sglq->state = SGL_FREED;
|
||||
ctx_buf->sglq->ndlp = NULL;
|
||||
|
||||
@ -1787,8 +1790,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||
atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
||||
spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
|
||||
list_for_each_entry_safe(ctxp, next_ctxp,
|
||||
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
|
||||
list) {
|
||||
@ -1806,10 +1808,10 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||
}
|
||||
ctxp->flag &= ~LPFC_NVME_XBUSY;
|
||||
spin_unlock(&ctxp->ctxlock);
|
||||
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
|
||||
iflag);
|
||||
|
||||
rrq_empty = list_empty(&phba->active_rrq_list);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
|
||||
if (ndlp &&
|
||||
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
|
||||
@ -1830,9 +1832,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||
lpfc_worker_wake_up(phba);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
|
||||
ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
|
||||
if (ctxp) {
|
||||
/*
|
||||
@ -1876,8 +1876,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
|
||||
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
||||
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
||||
spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
|
||||
list_for_each_entry_safe(ctxp, next_ctxp,
|
||||
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
|
||||
list) {
|
||||
@ -1886,9 +1885,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
|
||||
|
||||
xri = ctxp->ctxbuf->sglq->sli4_xritag;
|
||||
|
||||
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
|
||||
iflag);
|
||||
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
||||
ctxp->flag |= LPFC_NVME_ABTS_RCV;
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
||||
@ -1907,9 +1905,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
|
||||
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
|
||||
/* check the wait list */
|
||||
if (phba->sli4_hba.nvmet_io_wait_cnt) {
|
||||
struct rqb_dmabuf *nvmebuf;
|
||||
|
Loading…
Reference in New Issue
Block a user