qla2xxx: Move atioq to a different lock to reduce lock contention

99% of the time the ATIOQ has SCSI command.  The other 1% of time
is something else.  Most of the time this interrupt does not need
to hold the hardware_lock.  We're moving the ATIO interrupt thread
to a different lock to reduce lock contention.

Signed-off-by: Quinn Tran <quinn.tran@qlogic.com>
Signed-off-by: Himanshu Madhani <himanshu.madhani@qlogic.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Quinn Tran 2015-12-17 14:57:07 -05:00 committed by Nicholas Bellinger
parent fb3269baf4
commit 2f424b9b36
7 changed files with 129 additions and 28 deletions

View File

@ -2936,6 +2936,7 @@ struct qlt_hw_data {
uint32_t leak_exchg_thresh_hold;
spinlock_t sess_lock;
int rspq_vector_cpuid;
spinlock_t atio_lock ____cacheline_aligned;
};
#define MAX_QFULL_CMDS_ALLOC 8192

View File

@ -778,5 +778,6 @@ extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *,
extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr);
extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *);
extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t);
extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *);
#endif /* _QLA_GBL_H */

View File

@ -4919,7 +4919,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
unsigned long flags;
unsigned long flags, flags2;
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(vha)) {
@ -4948,8 +4948,10 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
* while we weren't online.
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
if (qla_tgt_mode_enabled(vha))
qlt_24xx_process_atio_queue(vha);
qlt_24xx_process_atio_queue(vha, 1);
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);

View File

@ -2605,8 +2605,14 @@ process_err:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
case ABTS_RECV_24XX:
if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
/* ensure that the ATIO queue is empty */
qlt_24xx_process_atio_queue(vha);
qlt_handle_abts_recv(vha, (response_t *)pkt);
break;
} else {
/* drop through */
qlt_24xx_process_atio_queue(vha, 1);
}
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case NOTIFY_ACK_TYPE:
@ -2773,13 +2779,22 @@ qla24xx_intr_handler(int irq, void *dev_id)
case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
case INTR_ATIO_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
case INTR_ATIO_QUE_UPDATE:{
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
qlt_24xx_process_atio_queue(vha, 1);
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
break;
case INTR_ATIO_RSP_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
}
case INTR_ATIO_RSP_QUE_UPDATE: {
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
qlt_24xx_process_atio_queue(vha, 1);
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
qla24xx_process_response_queue(vha, rsp);
break;
}
default:
ql_dbg(ql_dbg_async, vha, 0x504f,
"Unrecognized interrupt type (%d).\n", stat * 0xff);
@ -2938,13 +2953,22 @@ qla24xx_msix_default(int irq, void *dev_id)
case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
case INTR_ATIO_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
case INTR_ATIO_QUE_UPDATE:{
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
qlt_24xx_process_atio_queue(vha, 1);
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
break;
case INTR_ATIO_RSP_QUE_UPDATE:
qlt_24xx_process_atio_queue(vha);
}
case INTR_ATIO_RSP_QUE_UPDATE: {
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
qlt_24xx_process_atio_queue(vha, 1);
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
qla24xx_process_response_queue(vha, rsp);
break;
}
default:
ql_dbg(ql_dbg_async, vha, 0x5051,
"Unrecognized interrupt type (%d).\n", stat & 0xff);

View File

@ -2337,6 +2337,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ha->tgt.q_full_list);
spin_lock_init(&ha->tgt.q_full_lock);
spin_lock_init(&ha->tgt.sess_lock);
spin_lock_init(&ha->tgt.atio_lock);
/* Clear our data area */
ha->bars = bars;

View File

@ -100,7 +100,7 @@ enum fcp_resp_rsp_codes {
*/
/* Predefs for callbacks handed to qla2xxx LLD */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
struct atio_from_isp *pkt);
struct atio_from_isp *pkt, uint8_t);
static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
int fn, void *iocb, int flags);
@ -230,7 +230,7 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
}
static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
struct atio_from_isp *atio, uint8_t ha_locked)
{
ql_dbg(ql_dbg_tgt, vha, 0xe072,
"%s: qla_target(%d): type %x ox_id %04x\n",
@ -251,7 +251,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
atio->u.isp24.fcp_hdr.d_id[2]);
break;
}
qlt_24xx_atio_pkt(host, atio);
qlt_24xx_atio_pkt(host, atio, ha_locked);
break;
}
@ -274,7 +274,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
break;
}
}
qlt_24xx_atio_pkt(host, atio);
qlt_24xx_atio_pkt(host, atio, ha_locked);
break;
}
@ -1211,7 +1211,7 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
mutex_lock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
while (tgt->irq_cmd_count != 0) {
while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
udelay(2);
spin_lock_irqsave(&ha->hardware_lock, flags);
@ -5350,11 +5350,12 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
struct atio_from_isp *atio, uint8_t ha_locked)
{
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int rc;
unsigned long flags;
if (unlikely(tgt == NULL)) {
ql_dbg(ql_dbg_io, vha, 0x3064,
@ -5366,7 +5367,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
* Otherwise, some commands can stuck.
*/
tgt->irq_cmd_count++;
tgt->atio_irq_cmd_count++;
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
@ -5376,7 +5377,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
"qla_target(%d): ATIO_TYPE7 "
"received with UNKNOWN exchange address, "
"sending QUEUE_FULL\n", vha->vp_idx);
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
break;
}
@ -5385,7 +5390,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
rc = qlt_chk_qfull_thresh_hold(vha, atio);
if (rc != 0) {
tgt->irq_cmd_count--;
tgt->atio_irq_cmd_count--;
return;
}
rc = qlt_handle_cmd_for_atio(vha, atio);
@ -5394,11 +5399,20 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
}
if (unlikely(rc != 0)) {
if (rc == -ESRCH) {
if (!ha_locked)
spin_lock_irqsave
(&ha->hardware_lock, flags);
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
qlt_send_busy(vha, atio, SAM_STAT_BUSY);
#else
qlt_send_term_exchange(vha, NULL, atio, 1);
#endif
if (!ha_locked)
spin_unlock_irqrestore
(&ha->hardware_lock, flags);
} else {
if (tgt->tgt_stop) {
ql_dbg(ql_dbg_tgt, vha, 0xe059,
@ -5410,7 +5424,13 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
"qla_target(%d): Unable to send "
"command to target, sending BUSY "
"status.\n", vha->vp_idx);
if (!ha_locked)
spin_lock_irqsave(
&ha->hardware_lock, flags);
qlt_send_busy(vha, atio, SAM_STAT_BUSY);
if (!ha_locked)
spin_unlock_irqrestore(
&ha->hardware_lock, flags);
}
}
}
@ -5427,7 +5447,12 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
break;
}
ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
break;
}
@ -5438,7 +5463,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
break;
}
tgt->irq_cmd_count--;
tgt->atio_irq_cmd_count--;
}
/* ha->hardware_lock supposed to be held on entry */
@ -6384,7 +6409,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha)
* @ha: SCSI driver HA context
*/
void
qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
{
struct qla_hw_data *ha = vha->hw;
struct atio_from_isp *pkt;
@ -6397,7 +6422,8 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
cnt = pkt->u.raw.entry_count;
qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
ha_locked);
for (i = 0; i < cnt; i++) {
ha->tgt.atio_ring_index++;
@ -6681,16 +6707,59 @@ qla83xx_msix_atio_q(int irq, void *dev_id)
ha = rsp->hw;
vha = pci_get_drvdata(ha->pdev);
spin_lock_irqsave(&ha->hardware_lock, flags);
spin_lock_irqsave(&ha->tgt.atio_lock, flags);
qlt_24xx_process_atio_queue(vha);
qla24xx_process_response_queue(vha, rsp);
qlt_24xx_process_atio_queue(vha, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
return IRQ_HANDLED;
}
static void
qlt_handle_abts_recv_work(struct work_struct *work)
{
struct qla_tgt_sess_op *op = container_of(work,
struct qla_tgt_sess_op, work);
scsi_qla_host_t *vha = op->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
return;
spin_lock_irqsave(&ha->tgt.atio_lock, flags);
qlt_24xx_process_atio_queue(vha, 0);
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
void
qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
{
struct qla_tgt_sess_op *op;
op = kzalloc(sizeof(*op), GFP_ATOMIC);
if (!op) {
/* do not reach for ATIO queue here. This is best effort err
* recovery at this point.
*/
qlt_response_pkt_all_vps(vha, pkt);
return;
}
memcpy(&op->atio, pkt, sizeof(*pkt));
op->vha = vha;
op->chip_reset = vha->hw->chip_reset;
INIT_WORK(&op->work, qlt_handle_abts_recv_work);
queue_work(qla_tgt_wq, &op->work);
return;
}
int
qlt_mem_alloc(struct qla_hw_data *ha)
{

View File

@ -835,6 +835,7 @@ struct qla_tgt {
* HW lock.
*/
int irq_cmd_count;
int atio_irq_cmd_count;
int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
@ -883,6 +884,7 @@ struct qla_tgt {
struct qla_tgt_sess_op {
struct scsi_qla_host *vha;
uint32_t chip_reset;
struct atio_from_isp atio;
struct work_struct work;
struct list_head cmd_list;
@ -1155,7 +1157,7 @@ extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t);
extern void qlt_24xx_config_rings(struct scsi_qla_host *);
extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_24xx *);