linux/drivers/scsi/qla2xxx/qla_iocb.c
Madhuranath Iyengar 3822263eb1 [SCSI] qla2xxx: Support for asynchronous TM and Marker IOCBs.
Currently we can only issue the task management (TM)
commands via the mailbox mechanism. This is a limitation,
since only one mailbox command can be issued at a time.
The purpose of this effort is to provide support for
issuing and processing the respose to TM and Marker
IOCBs asynchronously. Towards achieving this, the
consolidated srb architecture that is currently used for
BSG and IOCB/Logio commands has been enhanced and used.

Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
2010-05-16 18:21:57 -04:00

1316 lines
34 KiB
C

/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2008 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_tcq.h>
static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
struct rsp_que *rsp);
static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
static void qla25xx_set_que(srb_t *, struct rsp_que **);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
* @cmd: SCSI command
*
* Returns the proper CF_* direction based on CDB.
*/
static inline uint16_t
qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
cflags = 0;
/* Set transfer direction */
if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
sp->fcport->vha->hw->qla_stats.output_bytes +=
scsi_bufflen(sp->cmd);
} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
sp->fcport->vha->hw->qla_stats.input_bytes +=
scsi_bufflen(sp->cmd);
}
return (cflags);
}
/**
* qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
* Continuation Type 0 IOCBs to allocate.
*
* @dsds: number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
*/
uint16_t
qla2x00_calc_iocbs_32(uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > 3) {
iocbs += (dsds - 3) / 7;
if ((dsds - 3) % 7)
iocbs++;
}
return (iocbs);
}
/**
* qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
*
* @dsds: number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
*/
uint16_t
qla2x00_calc_iocbs_64(uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > 2) {
iocbs += (dsds - 2) / 5;
if ((dsds - 2) % 5)
iocbs++;
}
return (iocbs);
}
/**
* qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
* @ha: HA context
*
* Returns a pointer to the Continuation Type 0 IOCB packet.
*/
static inline cont_entry_t *
qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
{
cont_entry_t *cont_pkt;
struct req_que *req = vha->req;
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
req->ring_index = 0;
req->ring_ptr = req->ring;
} else {
req->ring_ptr++;
}
cont_pkt = (cont_entry_t *)req->ring_ptr;
/* Load packet defaults. */
*((uint32_t *)(&cont_pkt->entry_type)) =
__constant_cpu_to_le32(CONTINUE_TYPE);
return (cont_pkt);
}
/**
* qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
* @ha: HA context
*
* Returns a pointer to the continuation type 1 IOCB packet.
*/
static inline cont_a64_entry_t *
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
{
cont_a64_entry_t *cont_pkt;
struct req_que *req = vha->req;
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
req->ring_index = 0;
req->ring_ptr = req->ring;
} else {
req->ring_ptr++;
}
cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
/* Load packet defaults. */
*((uint32_t *)(&cont_pkt->entry_type)) =
__constant_cpu_to_le32(CONTINUE_A64_TYPE);
return (cont_pkt);
}
/**
* qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
* capable IOCB types.
*
* @sp: SRB command to process
* @cmd_pkt: Command type 2 IOCB
* @tot_dsds: Total number of segments to transfer
*/
void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
uint16_t tot_dsds)
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
cmd = sp->cmd;
/* Update entry type to indicate Command Type 2 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
__constant_cpu_to_le32(COMMAND_TYPE);
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return;
}
vha = sp->fcport->vha;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Three DSDs are available in the Command Type 2 IOCB */
avail_dsds = 3;
cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
/* Load data segments */
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
cont_entry_t *cont_pkt;
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
* Seven DSDs are available in the Continuation
* Type 0 IOCB.
*/
cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
avail_dsds = 7;
}
*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
}
}
/**
* qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
* capable IOCB types.
*
* @sp: SRB command to process
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
*/
void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
uint16_t tot_dsds)
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
cmd = sp->cmd;
/* Update entry type to indicate Command Type 3 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
__constant_cpu_to_le32(COMMAND_A64_TYPE);
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return;
}
vha = sp->fcport->vha;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Two DSDs are available in the Command Type 3 IOCB */
avail_dsds = 2;
cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
/* Load data segments */
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
cont_a64_entry_t *cont_pkt;
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
sle_dma = sg_dma_address(sg);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
}
}
/**
* qla2x00_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
*
* Returns non-zero if a failure occurred, else zero.
*/
int
qla2x00_start_scsi(srb_t *sp)
{
int ret, nseg;
unsigned long flags;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
uint32_t index;
uint32_t handle;
cmd_entry_t *cmd_pkt;
uint16_t cnt;
uint16_t req_cnt;
uint16_t tot_dsds;
struct device_reg_2xxx __iomem *reg;
struct qla_hw_data *ha;
struct req_que *req;
struct rsp_que *rsp;
/* Setup device pointers. */
ret = 0;
vha = sp->fcport->vha;
ha = vha->hw;
reg = &ha->iobase->isp;
cmd = sp->cmd;
req = ha->req_q_map[0];
rsp = ha->rsp_q_map[0];
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
/* Send marker if required */
if (vha->marker_needed != 0) {
if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
!= QLA_SUCCESS)
return (QLA_FUNCTION_FAILED);
vha->marker_needed = 0;
}
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
handle++;
if (handle == MAX_OUTSTANDING_COMMANDS)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
if (index == MAX_OUTSTANDING_COMMANDS)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
if (scsi_sg_count(cmd)) {
nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
scsi_sg_count(cmd), cmd->sc_data_direction);
if (unlikely(!nseg))
goto queuing_error;
} else
nseg = 0;
tot_dsds = nseg;
/* Calculate the number of request entries needed. */
req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
req->cnt = req->length -
(req->ring_index - cnt);
}
if (req->cnt < (req_cnt + 2))
goto queuing_error;
/* Build command packet */
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
cmd_pkt = (cmd_entry_t *)req->ring_ptr;
cmd_pkt->handle = handle;
/* Zero out remaining portion of packet. */
clr_ptr = (uint32_t *)cmd_pkt + 2;
memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
/* Set target ID and LUN number*/
SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
/* Update tagged queuing modifier */
cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
/* Load SCSI command packet. */
memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
wmb();
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
req->ring_index = 0;
req->ring_ptr = req->ring;
} else
req->ring_ptr++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
rsp->ring_ptr->signature != RESPONSE_PROCESSED)
qla2x00_process_response_queue(rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return (QLA_SUCCESS);
queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return (QLA_FUNCTION_FAILED);
}
/**
* qla2x00_marker() - Send a marker IOCB to the firmware.
* @ha: HA context
* @loop_id: loop ID
* @lun: LUN
* @type: marker modifier
*
* Can be called from both normal and interrupt context.
*
* Returns non-zero if a failure occurred, else zero.
*/
int
__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
struct rsp_que *rsp, uint16_t loop_id,
uint16_t lun, uint8_t type)
{
mrk_entry_t *mrk;
struct mrk_entry_24xx *mrk24;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
mrk24 = NULL;
mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
if (mrk == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
__func__, base_vha->host_no));
return (QLA_FUNCTION_FAILED);
}
mrk->entry_type = MARKER_TYPE;
mrk->modifier = type;
if (type != MK_SYNC_ALL) {
if (IS_FWI2_CAPABLE(ha)) {
mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
mrk24->lun[1] = LSB(lun);
mrk24->lun[2] = MSB(lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
mrk24->vp_index = vha->vp_idx;
mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16(lun);
}
}
wmb();
qla2x00_isp_cmd(vha, req);
return (QLA_SUCCESS);
}
int
qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
uint8_t type)
{
int ret;
unsigned long flags = 0;
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
return (ret);
}
/**
* qla2x00_req_pkt() - Retrieve a request packet from the request ring.
* @ha: HA context
*
* Note: The caller must hold the hardware lock before calling this routine.
*
* Returns NULL if function failed, else, a pointer to the request packet.
*/
static request_t *
qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
struct rsp_que *rsp)
{
struct qla_hw_data *ha = vha->hw;
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
request_t *pkt = NULL;
uint16_t cnt;
uint32_t *dword_ptr;
uint32_t timer;
uint16_t req_cnt = 1;
/* Wait 1 second for slot. */
for (timer = HZ; timer; timer--) {
if ((req_cnt + 2) >= req->cnt) {
/* Calculate number of free request entries. */
if (ha->mqenable)
cnt = (uint16_t)
RD_REG_DWORD(&reg->isp25mq.req_q_out);
else {
if (IS_QLA82XX(ha))
cnt = (uint16_t)RD_REG_DWORD(
&reg->isp82.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
cnt = (uint16_t)RD_REG_DWORD(
&reg->isp24.req_q_out);
else
cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp));
}
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
req->cnt = req->length -
(req->ring_index - cnt);
}
/* If room for request in request ring. */
if ((req_cnt + 2) < req->cnt) {
req->cnt--;
pkt = req->ring_ptr;
/* Zero out packet. */
dword_ptr = (uint32_t *)pkt;
for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
*dword_ptr++ = 0;
/* Set entry count. */
pkt->entry_count = 1;
break;
}
/* Release ring specific lock */
spin_unlock_irq(&ha->hardware_lock);
udelay(2); /* 2 us */
/* Check for pending interrupts. */
/* During init we issue marker directly */
if (!vha->marker_needed && !vha->flags.init_done)
qla2x00_poll(rsp);
spin_lock_irq(&ha->hardware_lock);
}
if (!pkt) {
DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
}
return (pkt);
}
/**
* qla2x00_isp_cmd() - Modify the request ring pointer.
* @ha: HA context
*
* Note: The caller must hold the hardware lock before calling this routine.
*/
static void
qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
{
struct qla_hw_data *ha = vha->hw;
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
DEBUG5(printk("%s(): IOCB data:\n", __func__));
DEBUG5(qla2x00_dump_buffer(
(uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
req->ring_index = 0;
req->ring_ptr = req->ring;
} else
req->ring_ptr++;
/* Set chip new ring index. */
if (IS_QLA82XX(ha)) {
uint32_t dbval = 0x04 | (ha->portnum << 5);
/* write, read and verify logic */
dbval = dbval | (req->id << 8) | (req->ring_index << 16);
if (ql2xdbwr)
qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
else {
WRT_REG_DWORD(
(unsigned long __iomem *)ha->nxdb_wr_ptr,
dbval);
wmb();
while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
WRT_REG_DWORD((unsigned long __iomem *)
ha->nxdb_wr_ptr, dbval);
wmb();
}
}
} else if (ha->mqenable) {
/* Set chip new ring index. */
WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
RD_REG_DWORD(&ioreg->hccr);
} else {
if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
} else {
WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
req->ring_index);
RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
}
}
}
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
*
* @dsds: number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
*/
inline uint16_t
qla24xx_calc_iocbs(uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > 1) {
iocbs += (dsds - 1) / 5;
if ((dsds - 1) % 5)
iocbs++;
}
return iocbs;
}
/**
* qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
* IOCB types.
*
* @sp: SRB command to process
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
*/
inline void
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
uint16_t tot_dsds)
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
struct req_que *req;
cmd = sp->cmd;
/* Update entry type to indicate Command Type 3 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
__constant_cpu_to_le32(COMMAND_TYPE_7);
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return;
}
vha = sp->fcport->vha;
req = vha->req;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
sp->fcport->vha->hw->qla_stats.output_bytes +=
scsi_bufflen(sp->cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
sp->fcport->vha->hw->qla_stats.input_bytes +=
scsi_bufflen(sp->cmd);
}
/* One DSD is available in the Command Type 3 IOCB */
avail_dsds = 1;
cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
/* Load data segments */
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
cont_a64_entry_t *cont_pkt;
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
sle_dma = sg_dma_address(sg);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
}
}
/**
* qla24xx_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
*
* Returns non-zero if a failure occurred, else zero.
*/
int
qla24xx_start_scsi(srb_t *sp)
{
int ret, nseg;
unsigned long flags;
uint32_t *clr_ptr;
uint32_t index;
uint32_t handle;
struct cmd_type_7 *cmd_pkt;
uint16_t cnt;
uint16_t req_cnt;
uint16_t tot_dsds;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
/* Setup device pointers. */
ret = 0;
qla25xx_set_que(sp, &rsp);
req = vha->req;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
/* Send marker if required */
if (vha->marker_needed != 0) {
if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
!= QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
vha->marker_needed = 0;
}
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
handle++;
if (handle == MAX_OUTSTANDING_COMMANDS)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
if (index == MAX_OUTSTANDING_COMMANDS)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
if (scsi_sg_count(cmd)) {
nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
scsi_sg_count(cmd), cmd->sc_data_direction);
if (unlikely(!nseg))
goto queuing_error;
} else
nseg = 0;
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
req->cnt = req->length -
(req->ring_index - cnt);
}
if (req->cnt < (req_cnt + 2))
goto queuing_error;
/* Build command packet. */
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
clr_ptr = (uint32_t *)cmd_pkt + 2;
memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
/* Set NPORT-ID and LUN number*/
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
cmd_pkt->vp_index = sp->fcport->vp_idx;
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* Load SCSI command packet. */
memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
/* Specify response queue number where completion should happen */
cmd_pkt->entry_status = (uint8_t) rsp->id;
wmb();
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
req->ring_index = 0;
req->ring_ptr = req->ring;
} else
req->ring_ptr++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
rsp->ring_ptr->signature != RESPONSE_PROCESSED)
qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
}
static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
{
struct scsi_cmnd *cmd = sp->cmd;
struct qla_hw_data *ha = sp->fcport->vha->hw;
int affinity = cmd->request->cpu;
if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
affinity < ha->max_rsp_queues - 1)
*rsp = ha->rsp_q_map[affinity + 1];
else
*rsp = ha->rsp_q_map[0];
}
/* Generic Control-SRB manipulation functions. */
static void *
qla2x00_alloc_iocbs(srb_t *sp)
{
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
uint32_t index, handle;
request_t *pkt;
uint16_t cnt, req_cnt;
pkt = NULL;
req_cnt = 1;
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
handle++;
if (handle == MAX_OUTSTANDING_COMMANDS)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
if (index == MAX_OUTSTANDING_COMMANDS)
goto queuing_error;
/* Check for room on request queue. */
if (req->cnt < req_cnt) {
if (ha->mqenable)
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
else
cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp));
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
req->cnt = req->length -
(req->ring_index - cnt);
}
if (req->cnt < req_cnt)
goto queuing_error;
/* Prep packet */
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
req->cnt -= req_cnt;
pkt = req->ring_ptr;
memset(pkt, 0, REQUEST_ENTRY_SIZE);
pkt->entry_count = req_cnt;
pkt->handle = handle;
sp->handle = handle;
queuing_error:
return pkt;
}
static void
qla2x00_start_iocbs(srb_t *sp)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
struct req_que *req = ha->req_q_map[0];
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
if (IS_QLA82XX(ha)) {
qla82xx_start_iocbs(sp);
} else {
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
req->ring_index = 0;
req->ring_ptr = req->ring;
} else
req->ring_ptr++;
/* Set chip new ring index. */
if (ha->mqenable) {
WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
RD_REG_DWORD(&ioreg->hccr);
} else if (IS_QLA82XX(ha)) {
qla82xx_start_iocbs(sp);
} else if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
} else {
WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
req->ring_index);
RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
}
}
}
static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *lio = ctx->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
logio->vp_index = sp->fcport->vp_idx;
}
static void
qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *lio = ctx->u.iocb_cmd;
uint16_t opts;
mbx->entry_type = MBX_IOCB_TYPE;;
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
if (HAS_EXTENDED_IDS(ha)) {
mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
mbx->mb10 = cpu_to_le16(opts);
} else {
mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
}
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
}
static void
qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
logio->vp_index = sp->fcport->vp_idx;
}
static void
qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
mbx->entry_type = MBX_IOCB_TYPE;;
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
cpu_to_le16(sp->fcport->loop_id):
cpu_to_le16(sp->fcport->loop_id << 8);
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
/* Implicit: mbx->mbx10 = 0. */
}
static void
qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->vp_index = sp->fcport->vp_idx;
}
static void
qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
mbx->entry_type = MBX_IOCB_TYPE;
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
if (HAS_EXTENDED_IDS(ha)) {
mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
mbx->mb10 = cpu_to_le16(BIT_0);
} else {
mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
}
mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
}
static void
qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
{
uint32_t flags;
unsigned int lun;
struct fc_port *fcport = sp->fcport;
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *iocb = ctx->u.iocb_cmd;
struct req_que *req = vha->req;
flags = iocb->u.tmf.flags;
lun = iocb->u.tmf.lun;
tsk->entry_type = TSK_MGMT_IOCB_TYPE;
tsk->entry_count = 1;
tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
tsk->nport_handle = cpu_to_le16(fcport->loop_id);
tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
tsk->control_flags = cpu_to_le32(flags);
tsk->port_id[0] = fcport->d_id.b.al_pa;
tsk->port_id[1] = fcport->d_id.b.area;
tsk->port_id[2] = fcport->d_id.b.domain;
tsk->vp_index = fcport->vp_idx;
if (flags == TCF_LUN_RESET) {
int_to_scsilun(lun, &tsk->lun);
host_to_fcp_swap((uint8_t *)&tsk->lun,
sizeof(tsk->lun));
}
}
static void
qla24xx_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
{
uint16_t lun;
uint8_t modif;
struct fc_port *fcport = sp->fcport;
scsi_qla_host_t *vha = fcport->vha;
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *iocb = ctx->u.iocb_cmd;
struct req_que *req = vha->req;
lun = iocb->u.marker.lun;
modif = iocb->u.marker.modif;
mrk->entry_type = MARKER_TYPE;
mrk->modifier = modif;
if (modif != MK_SYNC_ALL) {
mrk->nport_handle = cpu_to_le16(fcport->loop_id);
mrk->lun[1] = LSB(lun);
mrk->lun[2] = MSB(lun);
host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
mrk->vp_index = vha->vp_idx;
mrk->handle = MAKE_HANDLE(req->id, mrk->handle);
}
}
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
els_iocb->sys_define = 0;
els_iocb->entry_status = 0;
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
els_iocb->vp_index = sp->fcport->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
els_iocb->opcode =
(((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
bsg_job->request->rqst_data.r_els.els_code :
bsg_job->request->rqst_data.h_els.command_code;
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
els_iocb->control_flags = 0;
els_iocb->rx_byte_count =
cpu_to_le32(bsg_job->reply_payload.payload_len);
els_iocb->tx_byte_count =
cpu_to_le32(bsg_job->request_payload.payload_len);
els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
(bsg_job->request_payload.sg_list)));
els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
(bsg_job->request_payload.sg_list)));
els_iocb->tx_len = cpu_to_le32(sg_dma_len
(bsg_job->request_payload.sg_list));
els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
(bsg_job->reply_payload.sg_list)));
els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
(bsg_job->reply_payload.sg_list)));
els_iocb->rx_len = cpu_to_le32(sg_dma_len
(bsg_job->reply_payload.sg_list));
}
static void
qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
struct scatterlist *sg;
int index;
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
int entry_count = 1;
ct_iocb->entry_type = CT_IOCB_TYPE;
ct_iocb->entry_status = 0;
ct_iocb->sys_define = 0;
ct_iocb->handle = sp->handle;
ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
ct_iocb->vp_index = sp->fcport->vp_idx;
ct_iocb->comp_status = __constant_cpu_to_le16(0);
ct_iocb->cmd_dsd_count =
__constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
ct_iocb->timeout = 0;
ct_iocb->rsp_dsd_count =
__constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
ct_iocb->rsp_byte_count =
cpu_to_le32(bsg_job->reply_payload.payload_len);
ct_iocb->cmd_byte_count =
cpu_to_le32(bsg_job->request_payload.payload_len);
ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
(bsg_job->request_payload.sg_list)));
ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
(bsg_job->request_payload.sg_list)));
ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
(bsg_job->request_payload.sg_list));
avail_dsds = 1;
cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
index = 0;
tot_dsds = bsg_job->reply_payload.sg_cnt;
for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
dma_addr_t sle_dma;
cont_a64_entry_t *cont_pkt;
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
entry_count++;
}
sle_dma = sg_dma_address(sg);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
loop_iterartion++;
avail_dsds--;
}
ct_iocb->entry_count = entry_count;
}
int
qla2x00_start_sp(srb_t *sp)
{
int rval;
struct qla_hw_data *ha = sp->fcport->vha->hw;
void *pkt;
struct srb_ctx *ctx = sp->ctx;
unsigned long flags;
rval = QLA_FUNCTION_FAILED;
spin_lock_irqsave(&ha->hardware_lock, flags);
pkt = qla2x00_alloc_iocbs(sp);
if (!pkt)
goto done;
rval = QLA_SUCCESS;
switch (ctx->type) {
case SRB_LOGIN_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_login_iocb(sp, pkt) :
qla2x00_login_iocb(sp, pkt);
break;
case SRB_LOGOUT_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_logout_iocb(sp, pkt) :
qla2x00_logout_iocb(sp, pkt);
break;
case SRB_ELS_CMD_RPT:
case SRB_ELS_CMD_HST:
qla24xx_els_iocb(sp, pkt);
break;
case SRB_CT_CMD:
qla24xx_ct_iocb(sp, pkt);
break;
case SRB_ADISC_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_adisc_iocb(sp, pkt) :
qla2x00_adisc_iocb(sp, pkt);
break;
case SRB_TM_CMD:
qla24xx_tm_iocb(sp, pkt);
break;
case SRB_MARKER_CMD:
qla24xx_marker_iocb(sp, pkt);
break;
default:
break;
}
wmb();
qla2x00_start_iocbs(sp);
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return rval;
}