forked from Minki/linux
Merge remote-tracking branch 'scsi-queue/drivers-for-3.16' into for-linus
This commit is contained in:
commit
b4c43993f4
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2003-2013 QLogic Corporation
|
||||
Copyright (c) 2003-2014 QLogic Corporation
|
||||
QLogic Linux FC-FCoE Driver
|
||||
|
||||
This program includes a device driver for Linux 3.x.
|
||||
|
38
MAINTAINERS
38
MAINTAINERS
@ -4366,7 +4366,7 @@ S: Supported
|
||||
F: drivers/crypto/nx/
|
||||
|
||||
IBM Power 842 compression accelerator
|
||||
M: Robert Jennings <rcj@linux.vnet.ibm.com>
|
||||
M: Nathan Fontenot <nfont@linux.vnet.ibm.com>
|
||||
S: Supported
|
||||
F: drivers/crypto/nx/nx-842.c
|
||||
F: include/linux/nx842.h
|
||||
@ -4382,12 +4382,18 @@ L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/ibm/ibmveth.*
|
||||
|
||||
IBM Power Virtual SCSI/FC Device Drivers
|
||||
M: Robert Jennings <rcj@linux.vnet.ibm.com>
|
||||
IBM Power Virtual SCSI Device Drivers
|
||||
M: Nathan Fontenot <nfont@linux.vnet.ibm.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/scsi/ibmvscsi/
|
||||
X: drivers/scsi/ibmvscsi/ibmvstgt.c
|
||||
F: drivers/scsi/ibmvscsi/ibmvscsi*
|
||||
F: drivers/scsi/ibmvscsi/viosrp.h
|
||||
|
||||
IBM Power Virtual FC Device Drivers
|
||||
M: Brian King <brking@linux.vnet.ibm.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/scsi/ibmvscsi/ibmvfc*
|
||||
|
||||
IBM ServeRAID RAID DRIVER
|
||||
P: Jack Hammer
|
||||
@ -6007,6 +6013,28 @@ M: Petr Vandrovec <petr@vandrovec.name>
|
||||
S: Odd Fixes
|
||||
F: fs/ncpfs/
|
||||
|
||||
NCR 5380 SCSI DRIVERS
|
||||
M: Finn Thain <fthain@telegraphics.com.au>
|
||||
M: Michael Schmitz <schmitzmic@gmail.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/scsi/g_NCR5380.txt
|
||||
F: drivers/scsi/NCR5380.*
|
||||
F: drivers/scsi/arm/cumana_1.c
|
||||
F: drivers/scsi/arm/oak.c
|
||||
F: drivers/scsi/atari_NCR5380.c
|
||||
F: drivers/scsi/atari_scsi.*
|
||||
F: drivers/scsi/dmx3191d.c
|
||||
F: drivers/scsi/dtc.*
|
||||
F: drivers/scsi/g_NCR5380.*
|
||||
F: drivers/scsi/g_NCR5380_mmio.c
|
||||
F: drivers/scsi/mac_scsi.*
|
||||
F: drivers/scsi/pas16.*
|
||||
F: drivers/scsi/sun3_NCR5380.c
|
||||
F: drivers/scsi/sun3_scsi.*
|
||||
F: drivers/scsi/sun3_scsi_vme.c
|
||||
F: drivers/scsi/t128.*
|
||||
|
||||
NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
|
||||
M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
|
@ -1037,7 +1037,7 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
|
||||
goto out;
|
||||
/* signature to know if this mf is freed */
|
||||
mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
|
||||
list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
|
||||
list_add(&mf->u.frame.linkage.list, &ioc->FreeQ);
|
||||
#ifdef MFCNT
|
||||
ioc->mfcnt--;
|
||||
#endif
|
||||
|
@ -2432,9 +2432,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
|
||||
int rc, cim_rev;
|
||||
ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
|
||||
MPT_FRAME_HDR *mf = NULL;
|
||||
MPIHeader_t *mpi_hdr;
|
||||
unsigned long timeleft;
|
||||
int retval;
|
||||
u32 msgcontext;
|
||||
|
||||
/* Reset long to int. Should affect IA64 and SPARC only
|
||||
*/
|
||||
@ -2581,11 +2581,11 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
|
||||
}
|
||||
|
||||
IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf;
|
||||
mpi_hdr = (MPIHeader_t *) mf;
|
||||
msgcontext = IstwiRWRequest->MsgContext;
|
||||
memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t));
|
||||
IstwiRWRequest->MsgContext = msgcontext;
|
||||
IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX;
|
||||
IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL;
|
||||
IstwiRWRequest->MsgContext = mpi_hdr->MsgContext;
|
||||
IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ;
|
||||
IstwiRWRequest->NumAddressBytes = 0x01;
|
||||
IstwiRWRequest->DataLength = cpu_to_le16(0x04);
|
||||
|
@ -649,7 +649,7 @@ mptfc_slave_alloc(struct scsi_device *sdev)
|
||||
}
|
||||
|
||||
static int
|
||||
mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
|
||||
mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
struct mptfc_rport_info *ri;
|
||||
struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
|
||||
@ -658,14 +658,14 @@ mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
|
||||
|
||||
if (!vdevice || !vdevice->vtarget) {
|
||||
SCpnt->result = DID_NO_CONNECT << 16;
|
||||
done(SCpnt);
|
||||
SCpnt->scsi_done(SCpnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = fc_remote_port_chkready(rport);
|
||||
if (unlikely(err)) {
|
||||
SCpnt->result = err;
|
||||
done(SCpnt);
|
||||
SCpnt->scsi_done(SCpnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -673,15 +673,13 @@ mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
|
||||
ri = *((struct mptfc_rport_info **)rport->dd_data);
|
||||
if (unlikely(!ri)) {
|
||||
SCpnt->result = DID_IMM_RETRY << 16;
|
||||
done(SCpnt);
|
||||
SCpnt->scsi_done(SCpnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return mptscsih_qcmd(SCpnt,done);
|
||||
return mptscsih_qcmd(SCpnt);
|
||||
}
|
||||
|
||||
static DEF_SCSI_QCMD(mptfc_qcmd)
|
||||
|
||||
/*
|
||||
* mptfc_display_port_link_speed - displaying link speed
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
|
@ -1896,7 +1896,7 @@ mptsas_slave_alloc(struct scsi_device *sdev)
|
||||
}
|
||||
|
||||
static int
|
||||
mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
|
||||
mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
MPT_SCSI_HOST *hd;
|
||||
MPT_ADAPTER *ioc;
|
||||
@ -1904,11 +1904,11 @@ mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
|
||||
|
||||
if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
|
||||
SCpnt->result = DID_NO_CONNECT << 16;
|
||||
done(SCpnt);
|
||||
SCpnt->scsi_done(SCpnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
hd = shost_priv(SCpnt->device->host);
|
||||
hd = shost_priv(shost);
|
||||
ioc = hd->ioc;
|
||||
|
||||
if (ioc->sas_discovery_quiesce_io)
|
||||
@ -1917,11 +1917,9 @@ mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
|
||||
if (ioc->debug_level & MPT_DEBUG_SCSI)
|
||||
scsi_print_command(SCpnt);
|
||||
|
||||
return mptscsih_qcmd(SCpnt,done);
|
||||
return mptscsih_qcmd(SCpnt);
|
||||
}
|
||||
|
||||
static DEF_SCSI_QCMD(mptsas_qcmd)
|
||||
|
||||
/**
|
||||
* mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
|
||||
* if the device under question is currently in the
|
||||
|
@ -1304,7 +1304,6 @@ int mptscsih_show_info(struct seq_file *m, struct Scsi_Host *host)
|
||||
/**
|
||||
* mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine.
|
||||
* @SCpnt: Pointer to scsi_cmnd structure
|
||||
* @done: Pointer SCSI mid-layer IO completion function
|
||||
*
|
||||
* (linux scsi_host_template.queuecommand routine)
|
||||
* This is the primary SCSI IO start routine. Create a MPI SCSIIORequest
|
||||
@ -1313,7 +1312,7 @@ int mptscsih_show_info(struct seq_file *m, struct Scsi_Host *host)
|
||||
* Returns 0. (rtn value discarded by linux scsi mid-layer)
|
||||
*/
|
||||
int
|
||||
mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
|
||||
mptscsih_qcmd(struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
MPT_SCSI_HOST *hd;
|
||||
MPT_FRAME_HDR *mf;
|
||||
@ -1329,10 +1328,9 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
|
||||
|
||||
hd = shost_priv(SCpnt->device->host);
|
||||
ioc = hd->ioc;
|
||||
SCpnt->scsi_done = done;
|
||||
|
||||
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
|
||||
ioc->name, SCpnt, done));
|
||||
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p\n",
|
||||
ioc->name, SCpnt));
|
||||
|
||||
if (ioc->taskmgmt_quiesce_io)
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
@ -113,7 +113,7 @@ extern int mptscsih_resume(struct pci_dev *pdev);
|
||||
#endif
|
||||
extern int mptscsih_show_info(struct seq_file *, struct Scsi_Host *);
|
||||
extern const char * mptscsih_info(struct Scsi_Host *SChost);
|
||||
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
|
||||
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt);
|
||||
extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
|
||||
u8 id, int lun, int ctx2abort, ulong timeout);
|
||||
extern void mptscsih_slave_destroy(struct scsi_device *device);
|
||||
|
@ -780,33 +780,31 @@ static int mptspi_slave_configure(struct scsi_device *sdev)
|
||||
}
|
||||
|
||||
static int
|
||||
mptspi_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
|
||||
mptspi_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host);
|
||||
struct _MPT_SCSI_HOST *hd = shost_priv(shost);
|
||||
VirtDevice *vdevice = SCpnt->device->hostdata;
|
||||
MPT_ADAPTER *ioc = hd->ioc;
|
||||
|
||||
if (!vdevice || !vdevice->vtarget) {
|
||||
SCpnt->result = DID_NO_CONNECT << 16;
|
||||
done(SCpnt);
|
||||
SCpnt->scsi_done(SCpnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (SCpnt->device->channel == 1 &&
|
||||
mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) {
|
||||
SCpnt->result = DID_NO_CONNECT << 16;
|
||||
done(SCpnt);
|
||||
SCpnt->scsi_done(SCpnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (spi_dv_pending(scsi_target(SCpnt->device)))
|
||||
ddvprintk(ioc, scsi_print_command(SCpnt));
|
||||
|
||||
return mptscsih_qcmd(SCpnt,done);
|
||||
return mptscsih_qcmd(SCpnt);
|
||||
}
|
||||
|
||||
static DEF_SCSI_QCMD(mptspi_qcmd)
|
||||
|
||||
static void mptspi_slave_destroy(struct scsi_device *sdev)
|
||||
{
|
||||
struct scsi_target *starget = scsi_target(sdev);
|
||||
|
@ -27,8 +27,6 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* $Log: NCR5380.c,v $
|
||||
|
||||
* Revision 1.10 1998/9/2 Alan Cox
|
||||
* (alan@lxorguk.ukuu.org.uk)
|
||||
* Fixed up the timer lockups reported so far. Things still suck. Looking
|
||||
@ -89,13 +87,6 @@
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_transport_spi.h>
|
||||
|
||||
#ifndef NDEBUG
|
||||
#define NDEBUG 0
|
||||
#endif
|
||||
#ifndef NDEBUG_ABORT
|
||||
#define NDEBUG_ABORT 0
|
||||
#endif
|
||||
|
||||
#if (NDEBUG & NDEBUG_LISTS)
|
||||
#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
|
||||
#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
|
||||
@ -1005,7 +996,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)
|
||||
LIST(cmd, tmp);
|
||||
tmp->host_scribble = (unsigned char *) cmd;
|
||||
}
|
||||
dprintk(NDEBUG_QUEUES, ("scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"));
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
|
||||
|
||||
/* Run the coroutine if it isn't already running. */
|
||||
/* Kick off command processing */
|
||||
@ -1040,7 +1031,7 @@ static void NCR5380_main(struct work_struct *work)
|
||||
/* Lock held here */
|
||||
done = 1;
|
||||
if (!hostdata->connected && !hostdata->selecting) {
|
||||
dprintk(NDEBUG_MAIN, ("scsi%d : not connected\n", instance->host_no));
|
||||
dprintk(NDEBUG_MAIN, "scsi%d : not connected\n", instance->host_no);
|
||||
/*
|
||||
* Search through the issue_queue for a command destined
|
||||
* for a target that's not busy.
|
||||
@ -1048,7 +1039,7 @@ static void NCR5380_main(struct work_struct *work)
|
||||
for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble)
|
||||
{
|
||||
if (prev != tmp)
|
||||
dprintk(NDEBUG_LISTS, ("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun));
|
||||
dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
|
||||
/* When we find one, remove it from the issue queue. */
|
||||
if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) {
|
||||
if (prev) {
|
||||
@ -1066,7 +1057,7 @@ static void NCR5380_main(struct work_struct *work)
|
||||
* On failure, we must add the command back to the
|
||||
* issue queue so we can keep trying.
|
||||
*/
|
||||
dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->target, tmp->lun));
|
||||
dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun);
|
||||
|
||||
/*
|
||||
* A successful selection is defined as one that
|
||||
@ -1095,7 +1086,7 @@ static void NCR5380_main(struct work_struct *work)
|
||||
tmp->host_scribble = (unsigned char *) hostdata->issue_queue;
|
||||
hostdata->issue_queue = tmp;
|
||||
done = 0;
|
||||
dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no));
|
||||
dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no);
|
||||
}
|
||||
/* lock held here still */
|
||||
} /* if target/lun is not busy */
|
||||
@ -1125,9 +1116,9 @@ static void NCR5380_main(struct work_struct *work)
|
||||
#endif
|
||||
&& (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies))
|
||||
) {
|
||||
dprintk(NDEBUG_MAIN, ("scsi%d : main() : performing information transfer\n", instance->host_no));
|
||||
dprintk(NDEBUG_MAIN, "scsi%d : main() : performing information transfer\n", instance->host_no);
|
||||
NCR5380_information_transfer(instance);
|
||||
dprintk(NDEBUG_MAIN, ("scsi%d : main() : done set false\n", instance->host_no));
|
||||
dprintk(NDEBUG_MAIN, "scsi%d : main() : done set false\n", instance->host_no);
|
||||
done = 0;
|
||||
} else
|
||||
break;
|
||||
@ -1159,8 +1150,8 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
|
||||
unsigned char basr;
|
||||
unsigned long flags;
|
||||
|
||||
dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n",
|
||||
instance->irq));
|
||||
dprintk(NDEBUG_INTR, "scsi : NCR5380 irq %d triggered\n",
|
||||
instance->irq);
|
||||
|
||||
do {
|
||||
done = 1;
|
||||
@ -1173,14 +1164,14 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
|
||||
NCR5380_dprint(NDEBUG_INTR, instance);
|
||||
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
|
||||
done = 0;
|
||||
dprintk(NDEBUG_INTR, ("scsi%d : SEL interrupt\n", instance->host_no));
|
||||
dprintk(NDEBUG_INTR, "scsi%d : SEL interrupt\n", instance->host_no);
|
||||
NCR5380_reselect(instance);
|
||||
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
} else if (basr & BASR_PARITY_ERROR) {
|
||||
dprintk(NDEBUG_INTR, ("scsi%d : PARITY interrupt\n", instance->host_no));
|
||||
dprintk(NDEBUG_INTR, "scsi%d : PARITY interrupt\n", instance->host_no);
|
||||
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
} else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
|
||||
dprintk(NDEBUG_INTR, ("scsi%d : RESET interrupt\n", instance->host_no));
|
||||
dprintk(NDEBUG_INTR, "scsi%d : RESET interrupt\n", instance->host_no);
|
||||
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
} else {
|
||||
#if defined(REAL_DMA)
|
||||
@ -1210,7 +1201,7 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
}
|
||||
#else
|
||||
dprintk(NDEBUG_INTR, ("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)));
|
||||
dprintk(NDEBUG_INTR, "scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
|
||||
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
#endif
|
||||
}
|
||||
@ -1304,7 +1295,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
|
||||
hostdata->restart_select = 0;
|
||||
|
||||
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
|
||||
dprintk(NDEBUG_ARBITRATION, ("scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id));
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id);
|
||||
|
||||
/*
|
||||
* Set the phase bits to 0, otherwise the NCR5380 won't drive the
|
||||
@ -1333,7 +1324,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
|
||||
goto failed;
|
||||
}
|
||||
|
||||
dprintk(NDEBUG_ARBITRATION, ("scsi%d : arbitration complete\n", instance->host_no));
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d : arbitration complete\n", instance->host_no);
|
||||
|
||||
/*
|
||||
* The arbitration delay is 2.2us, but this is a minimum and there is
|
||||
@ -1347,7 +1338,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
|
||||
/* Check for lost arbitration */
|
||||
if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
|
||||
NCR5380_write(MODE_REG, MR_BASE);
|
||||
dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no));
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no);
|
||||
goto failed;
|
||||
}
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL);
|
||||
@ -1360,7 +1351,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
|
||||
(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
|
||||
NCR5380_write(MODE_REG, MR_BASE);
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no));
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no);
|
||||
goto failed;
|
||||
}
|
||||
/*
|
||||
@ -1370,7 +1361,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
|
||||
|
||||
udelay(2);
|
||||
|
||||
dprintk(NDEBUG_ARBITRATION, ("scsi%d : won arbitration\n", instance->host_no));
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d : won arbitration\n", instance->host_no);
|
||||
|
||||
/*
|
||||
* Now that we have won arbitration, start Selection process, asserting
|
||||
@ -1422,7 +1413,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
|
||||
|
||||
udelay(1);
|
||||
|
||||
dprintk(NDEBUG_SELECTION, ("scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd)));
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd));
|
||||
|
||||
/*
|
||||
* The SCSI specification calls for a 250 ms timeout for the actual
|
||||
@ -1487,7 +1478,7 @@ part2:
|
||||
collect_stats(hostdata, cmd);
|
||||
cmd->scsi_done(cmd);
|
||||
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
|
||||
dprintk(NDEBUG_SELECTION, ("scsi%d : target did not respond within 250ms\n", instance->host_no));
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no);
|
||||
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
|
||||
return 0;
|
||||
}
|
||||
@ -1520,7 +1511,7 @@ part2:
|
||||
goto failed;
|
||||
}
|
||||
|
||||
dprintk(NDEBUG_SELECTION, ("scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id));
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id);
|
||||
tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun);
|
||||
|
||||
len = 1;
|
||||
@ -1530,7 +1521,7 @@ part2:
|
||||
data = tmp;
|
||||
phase = PHASE_MSGOUT;
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data);
|
||||
dprintk(NDEBUG_SELECTION, ("scsi%d : nexus established.\n", instance->host_no));
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no);
|
||||
/* XXX need to handle errors here */
|
||||
hostdata->connected = cmd;
|
||||
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
|
||||
@ -1583,9 +1574,9 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
|
||||
NCR5380_setup(instance);
|
||||
|
||||
if (!(p & SR_IO))
|
||||
dprintk(NDEBUG_PIO, ("scsi%d : pio write %d bytes\n", instance->host_no, c));
|
||||
dprintk(NDEBUG_PIO, "scsi%d : pio write %d bytes\n", instance->host_no, c);
|
||||
else
|
||||
dprintk(NDEBUG_PIO, ("scsi%d : pio read %d bytes\n", instance->host_no, c));
|
||||
dprintk(NDEBUG_PIO, "scsi%d : pio read %d bytes\n", instance->host_no, c);
|
||||
|
||||
/*
|
||||
* The NCR5380 chip will only drive the SCSI bus when the
|
||||
@ -1620,11 +1611,11 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
|
||||
break;
|
||||
}
|
||||
|
||||
dprintk(NDEBUG_HANDSHAKE, ("scsi%d : REQ detected\n", instance->host_no));
|
||||
dprintk(NDEBUG_HANDSHAKE, "scsi%d : REQ detected\n", instance->host_no);
|
||||
|
||||
/* Check for phase mismatch */
|
||||
if ((tmp & PHASE_MASK) != p) {
|
||||
dprintk(NDEBUG_HANDSHAKE, ("scsi%d : phase mismatch\n", instance->host_no));
|
||||
dprintk(NDEBUG_HANDSHAKE, "scsi%d : phase mismatch\n", instance->host_no);
|
||||
NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance);
|
||||
break;
|
||||
}
|
||||
@ -1660,7 +1651,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
|
||||
|
||||
/* FIXME - if this fails bus reset ?? */
|
||||
NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ);
|
||||
dprintk(NDEBUG_HANDSHAKE, ("scsi%d : req false, handshake complete\n", instance->host_no));
|
||||
dprintk(NDEBUG_HANDSHAKE, "scsi%d : req false, handshake complete\n", instance->host_no);
|
||||
|
||||
/*
|
||||
* We have several special cases to consider during REQ/ACK handshaking :
|
||||
@ -1681,7 +1672,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
|
||||
}
|
||||
} while (--c);
|
||||
|
||||
dprintk(NDEBUG_PIO, ("scsi%d : residual %d\n", instance->host_no, c));
|
||||
dprintk(NDEBUG_PIO, "scsi%d : residual %d\n", instance->host_no, c);
|
||||
|
||||
*count = c;
|
||||
*data = d;
|
||||
@ -1828,7 +1819,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
|
||||
c -= 2;
|
||||
}
|
||||
#endif
|
||||
dprintk(NDEBUG_DMA, ("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d));
|
||||
dprintk(NDEBUG_DMA, "scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d);
|
||||
hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c);
|
||||
#endif
|
||||
|
||||
@ -1857,7 +1848,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
|
||||
NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
|
||||
#endif /* def REAL_DMA */
|
||||
|
||||
dprintk(NDEBUG_DMA, ("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG)));
|
||||
dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));
|
||||
|
||||
/*
|
||||
* On the PAS16 at least I/O recovery delays are not needed here.
|
||||
@ -1934,7 +1925,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
|
||||
}
|
||||
}
|
||||
|
||||
dprintk(NDEBUG_DMA, ("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG)));
|
||||
dprintk(NDEBUG_DMA, "scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG));
|
||||
|
||||
NCR5380_write(MODE_REG, MR_BASE);
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
@ -1948,7 +1939,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
|
||||
#ifdef READ_OVERRUNS
|
||||
if (*phase == p && (p & SR_IO) && residue == 0) {
|
||||
if (overrun) {
|
||||
dprintk(NDEBUG_DMA, ("Got an input overrun, using saved byte\n"));
|
||||
dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
|
||||
**data = saved_data;
|
||||
*data += 1;
|
||||
*count -= 1;
|
||||
@ -1957,13 +1948,13 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
|
||||
printk("No overrun??\n");
|
||||
cnt = toPIO = 2;
|
||||
}
|
||||
dprintk(NDEBUG_DMA, ("Doing %d-byte PIO to 0x%X\n", cnt, *data));
|
||||
dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data);
|
||||
NCR5380_transfer_pio(instance, phase, &cnt, data);
|
||||
*count -= toPIO - cnt;
|
||||
}
|
||||
#endif
|
||||
|
||||
dprintk(NDEBUG_DMA, ("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count)));
|
||||
dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count));
|
||||
return 0;
|
||||
|
||||
#elif defined(REAL_DMA)
|
||||
@ -2013,7 +2004,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
|
||||
foo = NCR5380_pwrite(instance, d, c);
|
||||
#else
|
||||
int timeout;
|
||||
dprintk(NDEBUG_C400_PWRITE, ("About to pwrite %d bytes\n", c));
|
||||
dprintk(NDEBUG_C400_PWRITE, "About to pwrite %d bytes\n", c);
|
||||
if (!(foo = NCR5380_pwrite(instance, d, c))) {
|
||||
/*
|
||||
* Wait for the last byte to be sent. If REQ is being asserted for
|
||||
@ -2024,19 +2015,19 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
|
||||
while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH));
|
||||
|
||||
if (!timeout)
|
||||
dprintk(NDEBUG_LAST_BYTE_SENT, ("scsi%d : timed out on last byte\n", instance->host_no));
|
||||
dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : timed out on last byte\n", instance->host_no);
|
||||
|
||||
if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) {
|
||||
hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT;
|
||||
if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) {
|
||||
hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT;
|
||||
dprintk(NDEBUG_LAST_WRITE_SENT, ("scsi%d : last bit sent works\n", instance->host_no));
|
||||
dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : last byte sent works\n", instance->host_no);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dprintk(NDEBUG_C400_PWRITE, ("Waiting for LASTBYTE\n"));
|
||||
dprintk(NDEBUG_C400_PWRITE, "Waiting for LASTBYTE\n");
|
||||
while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT));
|
||||
dprintk(NDEBUG_C400_PWRITE, ("Got LASTBYTE\n"));
|
||||
dprintk(NDEBUG_C400_PWRITE, "Got LASTBYTE\n");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -2045,9 +2036,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
|
||||
if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) {
|
||||
dprintk(NDEBUG_C400_PWRITE, ("53C400w: Checking for IRQ\n"));
|
||||
dprintk(NDEBUG_C400_PWRITE, "53C400w: Checking for IRQ\n");
|
||||
if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) {
|
||||
dprintk(NDEBUG_C400_PWRITE, ("53C400w: got it, reading reset interrupt reg\n"));
|
||||
dprintk(NDEBUG_C400_PWRITE, "53C400w: got it, reading reset interrupt reg\n");
|
||||
NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
} else {
|
||||
printk("53C400w: IRQ NOT THERE!\n");
|
||||
@ -2139,7 +2130,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
||||
--cmd->SCp.buffers_residual;
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
|
||||
dprintk(NDEBUG_INFORMATION, "scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual);
|
||||
}
|
||||
/*
|
||||
* The preferred transfer method is going to be
|
||||
@ -2219,7 +2210,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
||||
case LINKED_FLG_CMD_COMPLETE:
|
||||
/* Accept message by clearing ACK */
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun));
|
||||
dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun);
|
||||
/*
|
||||
* Sanity check : A linked command should only terminate with
|
||||
* one of these messages if there are more linked commands
|
||||
@ -2235,7 +2226,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
||||
/* The next command is still part of this process */
|
||||
cmd->next_link->tag = cmd->tag;
|
||||
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
|
||||
dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun));
|
||||
dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun);
|
||||
collect_stats(hostdata, cmd);
|
||||
cmd->scsi_done(cmd);
|
||||
cmd = hostdata->connected;
|
||||
@ -2247,7 +2238,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
||||
sink = 1;
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
hostdata->connected = NULL;
|
||||
dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun));
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun);
|
||||
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
|
||||
|
||||
/*
|
||||
@ -2281,13 +2272,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
||||
if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
|
||||
scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
|
||||
|
||||
dprintk(NDEBUG_AUTOSENSE, ("scsi%d : performing request sense\n", instance->host_no));
|
||||
dprintk(NDEBUG_AUTOSENSE, "scsi%d : performing request sense\n", instance->host_no);
|
||||
|
||||
LIST(cmd, hostdata->issue_queue);
|
||||
cmd->host_scribble = (unsigned char *)
|
||||
hostdata->issue_queue;
|
||||
hostdata->issue_queue = (Scsi_Cmnd *) cmd;
|
||||
dprintk(NDEBUG_QUEUES, ("scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no));
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no);
|
||||
} else
|
||||
#endif /* def AUTOSENSE */
|
||||
{
|
||||
@ -2327,7 +2318,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
||||
hostdata->disconnected_queue;
|
||||
hostdata->connected = NULL;
|
||||
hostdata->disconnected_queue = cmd;
|
||||
dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun));
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun);
|
||||
/*
|
||||
* Restore phase bits to 0 so an interrupted selection,
|
||||
* arbitration can resume.
|
||||
@ -2373,14 +2364,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
||||
extended_msg[0] = EXTENDED_MESSAGE;
|
||||
/* Accept first byte by clearing ACK */
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
dprintk(NDEBUG_EXTENDED, ("scsi%d : receiving extended message\n", instance->host_no));
|
||||
dprintk(NDEBUG_EXTENDED, "scsi%d : receiving extended message\n", instance->host_no);
|
||||
|
||||
len = 2;
|
||||
data = extended_msg + 1;
|
||||
phase = PHASE_MSGIN;
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data);
|
||||
|
||||
dprintk(NDEBUG_EXTENDED, ("scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]));
|
||||
dprintk(NDEBUG_EXTENDED, "scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]);
|
||||
|
||||
if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) {
|
||||
/* Accept third byte by clearing ACK */
|
||||
@ -2390,7 +2381,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
||||
phase = PHASE_MSGIN;
|
||||
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data);
|
||||
dprintk(NDEBUG_EXTENDED, ("scsi%d : message received, residual %d\n", instance->host_no, len));
|
||||
dprintk(NDEBUG_EXTENDED, "scsi%d : message received, residual %d\n", instance->host_no, len);
|
||||
|
||||
switch (extended_msg[2]) {
|
||||
case EXTENDED_SDTR:
|
||||
@ -2456,7 +2447,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data);
|
||||
if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) {
|
||||
NCR5380_set_timer(hostdata, USLEEP_SLEEP);
|
||||
dprintk(NDEBUG_USLEEP, ("scsi%d : issued command, sleeping until %ul\n", instance->host_no, hostdata->time_expires));
|
||||
dprintk(NDEBUG_USLEEP, "scsi%d : issued command, sleeping until %lu\n", instance->host_no, hostdata->time_expires);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
@ -2468,7 +2459,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
||||
break;
|
||||
default:
|
||||
printk("scsi%d : unknown phase\n", instance->host_no);
|
||||
NCR5380_dprint(NDEBUG_ALL, instance);
|
||||
NCR5380_dprint(NDEBUG_ANY, instance);
|
||||
} /* switch(phase) */
|
||||
} /* if (tmp * SR_REQ) */
|
||||
else {
|
||||
@ -2476,7 +2467,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
||||
*/
|
||||
if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) {
|
||||
NCR5380_set_timer(hostdata, USLEEP_SLEEP);
|
||||
dprintk(NDEBUG_USLEEP, ("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no, hostdata->time_expires));
|
||||
dprintk(NDEBUG_USLEEP, "scsi%d : poll timed out, sleeping until %lu\n", instance->host_no, hostdata->time_expires);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -2517,7 +2508,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
|
||||
hostdata->restart_select = 1;
|
||||
|
||||
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
|
||||
dprintk(NDEBUG_SELECTION, ("scsi%d : reselect\n", instance->host_no));
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d : reselect\n", instance->host_no);
|
||||
|
||||
/*
|
||||
* At this point, we have detected that our SCSI ID is on the bus,
|
||||
@ -2597,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
|
||||
do_abort(instance);
|
||||
} else {
|
||||
hostdata->connected = tmp;
|
||||
dprintk(NDEBUG_RESELECTION, ("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->target, tmp->lun, tmp->tag));
|
||||
dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2682,8 +2673,8 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
|
||||
|
||||
NCR5380_setup(instance);
|
||||
|
||||
dprintk(NDEBUG_ABORT, ("scsi%d : abort called\n", instance->host_no));
|
||||
dprintk(NDEBUG_ABORT, (" basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d : abort called\n", instance->host_no);
|
||||
dprintk(NDEBUG_ABORT, " basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG));
|
||||
|
||||
#if 0
|
||||
/*
|
||||
@ -2693,7 +2684,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
|
||||
*/
|
||||
|
||||
if (hostdata->connected == cmd) {
|
||||
dprintk(NDEBUG_ABORT, ("scsi%d : aborting connected command\n", instance->host_no));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d : aborting connected command\n", instance->host_no);
|
||||
hostdata->aborted = 1;
|
||||
/*
|
||||
* We should perform BSY checking, and make sure we haven't slipped
|
||||
@ -2721,14 +2712,14 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
|
||||
* from the issue queue.
|
||||
*/
|
||||
|
||||
dprintk(NDEBUG_ABORT, ("scsi%d : abort going into loop.\n", instance->host_no));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no);
|
||||
for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble)
|
||||
if (cmd == tmp) {
|
||||
REMOVE(5, *prev, tmp, tmp->host_scribble);
|
||||
(*prev) = (Scsi_Cmnd *) tmp->host_scribble;
|
||||
tmp->host_scribble = NULL;
|
||||
tmp->result = DID_ABORT << 16;
|
||||
dprintk(NDEBUG_ABORT, ("scsi%d : abort removed command from issue queue.\n", instance->host_no));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no);
|
||||
tmp->scsi_done(tmp);
|
||||
return SUCCESS;
|
||||
}
|
||||
@ -2750,7 +2741,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
|
||||
*/
|
||||
|
||||
if (hostdata->connected) {
|
||||
dprintk(NDEBUG_ABORT, ("scsi%d : abort failed, command connected.\n", instance->host_no));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d : abort failed, command connected.\n", instance->host_no);
|
||||
return FAILED;
|
||||
}
|
||||
/*
|
||||
@ -2780,11 +2771,11 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
|
||||
|
||||
for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)
|
||||
if (cmd == tmp) {
|
||||
dprintk(NDEBUG_ABORT, ("scsi%d : aborting disconnected command.\n", instance->host_no));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no);
|
||||
|
||||
if (NCR5380_select(instance, cmd, (int) cmd->tag))
|
||||
return FAILED;
|
||||
dprintk(NDEBUG_ABORT, ("scsi%d : nexus reestablished.\n", instance->host_no));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no);
|
||||
|
||||
do_abort(instance);
|
||||
|
||||
|
@ -21,10 +21,6 @@
|
||||
* 1+ (800) 334-5454
|
||||
*/
|
||||
|
||||
/*
|
||||
* $Log: NCR5380.h,v $
|
||||
*/
|
||||
|
||||
#ifndef NCR5380_H
|
||||
#define NCR5380_H
|
||||
|
||||
@ -60,6 +56,9 @@
|
||||
#define NDEBUG_C400_PREAD 0x100000
|
||||
#define NDEBUG_C400_PWRITE 0x200000
|
||||
#define NDEBUG_LISTS 0x400000
|
||||
#define NDEBUG_ABORT 0x800000
|
||||
#define NDEBUG_TAGS 0x1000000
|
||||
#define NDEBUG_MERGING 0x2000000
|
||||
|
||||
#define NDEBUG_ANY 0xFFFFFFFFUL
|
||||
|
||||
@ -292,9 +291,24 @@ struct NCR5380_hostdata {
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define dprintk(a,b) do {} while(0)
|
||||
#define NCR5380_dprint(a,b) do {} while(0)
|
||||
#define NCR5380_dprint_phase(a,b) do {} while(0)
|
||||
#ifndef NDEBUG
|
||||
#define NDEBUG (0)
|
||||
#endif
|
||||
|
||||
#define dprintk(flg, fmt, ...) \
|
||||
do { if ((NDEBUG) & (flg)) pr_debug(fmt, ## __VA_ARGS__); } while (0)
|
||||
|
||||
#if NDEBUG
|
||||
#define NCR5380_dprint(flg, arg) \
|
||||
do { if ((NDEBUG) & (flg)) NCR5380_print(arg); } while (0)
|
||||
#define NCR5380_dprint_phase(flg, arg) \
|
||||
do { if ((NDEBUG) & (flg)) NCR5380_print_phase(arg); } while (0)
|
||||
static void NCR5380_print_phase(struct Scsi_Host *instance);
|
||||
static void NCR5380_print(struct Scsi_Host *instance);
|
||||
#else
|
||||
#define NCR5380_dprint(flg, arg) do {} while (0)
|
||||
#define NCR5380_dprint_phase(flg, arg) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(AUTOPROBE_IRQ)
|
||||
static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
|
||||
@ -307,10 +321,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);
|
||||
#endif
|
||||
static void NCR5380_main(struct work_struct *work);
|
||||
static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance);
|
||||
#ifdef NDEBUG
|
||||
static void NCR5380_print_phase(struct Scsi_Host *instance);
|
||||
static void NCR5380_print(struct Scsi_Host *instance);
|
||||
#endif
|
||||
static int NCR5380_abort(Scsi_Cmnd * cmd);
|
||||
static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
|
||||
static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
|
||||
|
@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
|
||||
for (bit = 0; bit < 8; bit++) {
|
||||
|
||||
if ((pci_status[i] & (0x1 << bit)) != 0) {
|
||||
static const char *s;
|
||||
const char *s;
|
||||
|
||||
s = pci_status_strings[bit];
|
||||
if (i == 7/*TARG*/ && bit == 3)
|
||||
@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
|
||||
|
||||
for (bit = 0; bit < 8; bit++) {
|
||||
|
||||
if ((split_status[i] & (0x1 << bit)) != 0) {
|
||||
static const char *s;
|
||||
|
||||
s = split_status_strings[bit];
|
||||
printk(s, ahd_name(ahd),
|
||||
if ((split_status[i] & (0x1 << bit)) != 0)
|
||||
printk(split_status_strings[bit], ahd_name(ahd),
|
||||
split_status_source[i]);
|
||||
}
|
||||
|
||||
if (i > 1)
|
||||
continue;
|
||||
|
||||
if ((sg_split_status[i] & (0x1 << bit)) != 0) {
|
||||
static const char *s;
|
||||
|
||||
s = split_status_strings[bit];
|
||||
printk(s, ahd_name(ahd), "SG");
|
||||
}
|
||||
if ((sg_split_status[i] & (0x1 << bit)) != 0)
|
||||
printk(split_status_strings[bit], ahd_name(ahd), "SG");
|
||||
}
|
||||
}
|
||||
/*
|
||||
|
@ -61,13 +61,6 @@
|
||||
* comment out the undef.
|
||||
*/
|
||||
#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
|
||||
/*
|
||||
* SCSI-II Linked command support.
|
||||
*
|
||||
* The higher level code doesn't support linked commands yet, and so the option
|
||||
* is undef'd here.
|
||||
*/
|
||||
#undef CONFIG_SCSI_ACORNSCSI_LINK
|
||||
/*
|
||||
* SCSI-II Synchronous transfer support.
|
||||
*
|
||||
@ -160,10 +153,6 @@
|
||||
#error "Yippee! ABORT TAG is now defined! Remove this error!"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCSI_ACORNSCSI_LINK
|
||||
#error SCSI2 LINKed commands not supported (yet)!
|
||||
#endif
|
||||
|
||||
#ifdef USE_DMAC
|
||||
/*
|
||||
* DMAC setup parameters
|
||||
@ -1668,42 +1657,6 @@ void acornscsi_message(AS_Host *host)
|
||||
}
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_SCSI_ACORNSCSI_LINK
|
||||
case LINKED_CMD_COMPLETE:
|
||||
case LINKED_FLG_CMD_COMPLETE:
|
||||
/*
|
||||
* We don't support linked commands yet
|
||||
*/
|
||||
if (0) {
|
||||
#if (DEBUG & DEBUG_LINK)
|
||||
printk("scsi%d.%c: lun %d tag %d linked command complete\n",
|
||||
host->host->host_no, acornscsi_target(host), host->SCpnt->tag);
|
||||
#endif
|
||||
/*
|
||||
* A linked command should only terminate with one of these messages
|
||||
* if there are more linked commands available.
|
||||
*/
|
||||
if (!host->SCpnt->next_link) {
|
||||
printk(KERN_WARNING "scsi%d.%c: lun %d tag %d linked command complete, but no next_link\n",
|
||||
instance->host_no, acornscsi_target(host), host->SCpnt->tag);
|
||||
acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
|
||||
msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
|
||||
} else {
|
||||
struct scsi_cmnd *SCpnt = host->SCpnt;
|
||||
|
||||
acornscsi_dma_cleanup(host);
|
||||
|
||||
host->SCpnt = host->SCpnt->next_link;
|
||||
host->SCpnt->tag = SCpnt->tag;
|
||||
SCpnt->result = DID_OK | host->scsi.SCp.Message << 8 | host->Scsi.SCp.Status;
|
||||
SCpnt->done(SCpnt);
|
||||
|
||||
/* initialise host->SCpnt->SCp */
|
||||
}
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
default: /* reject message */
|
||||
printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n",
|
||||
host->host->host_no, acornscsi_target(host),
|
||||
@ -2825,9 +2778,6 @@ char *acornscsi_info(struct Scsi_Host *host)
|
||||
#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
|
||||
" TAG"
|
||||
#endif
|
||||
#ifdef CONFIG_SCSI_ACORNSCSI_LINK
|
||||
" LINK"
|
||||
#endif
|
||||
#if (DEBUG & DEBUG_NO_WRITE)
|
||||
" NOWRITE (" __stringify(NO_WRITE) ")"
|
||||
#endif
|
||||
@ -2851,9 +2801,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
|
||||
#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
|
||||
" TAG"
|
||||
#endif
|
||||
#ifdef CONFIG_SCSI_ACORNSCSI_LINK
|
||||
" LINK"
|
||||
#endif
|
||||
#if (DEBUG & DEBUG_NO_WRITE)
|
||||
" NOWRITE (" __stringify(NO_WRITE) ")"
|
||||
#endif
|
||||
|
@ -36,9 +36,6 @@
|
||||
void __iomem *base; \
|
||||
void __iomem *dma
|
||||
|
||||
#define BOARD_NORMAL 0
|
||||
#define BOARD_NCR53C400 1
|
||||
|
||||
#include "../NCR5380.h"
|
||||
|
||||
void cumanascsi_setup(char *str, int *ints)
|
||||
|
@ -37,9 +37,6 @@
|
||||
#define NCR5380_implementation_fields \
|
||||
void __iomem *base
|
||||
|
||||
#define BOARD_NORMAL 0
|
||||
#define BOARD_NCR53C400 1
|
||||
|
||||
#include "../NCR5380.h"
|
||||
|
||||
#undef START_DMA_INITIATOR_RECEIVE_REG
|
||||
|
@ -370,7 +370,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)
|
||||
return 0;
|
||||
if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=
|
||||
TagAlloc[cmd->device->id][cmd->device->lun].queue_size) {
|
||||
TAG_PRINTK("scsi%d: target %d lun %d: no free tags\n",
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
|
||||
H_NO(cmd), cmd->device->id, cmd->device->lun);
|
||||
return 1;
|
||||
}
|
||||
@ -394,7 +394,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
|
||||
!setup_use_tagged_queuing || !cmd->device->tagged_supported) {
|
||||
cmd->tag = TAG_NONE;
|
||||
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
|
||||
TAG_PRINTK("scsi%d: target %d lun %d now allocated by untagged "
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
|
||||
"command\n", H_NO(cmd), cmd->device->id, cmd->device->lun);
|
||||
} else {
|
||||
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
|
||||
@ -402,7 +402,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
|
||||
cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);
|
||||
set_bit(cmd->tag, ta->allocated);
|
||||
ta->nr_allocated++;
|
||||
TAG_PRINTK("scsi%d: using tag %d for target %d lun %d "
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
|
||||
"(now %d tags in use)\n",
|
||||
H_NO(cmd), cmd->tag, cmd->device->id,
|
||||
cmd->device->lun, ta->nr_allocated);
|
||||
@ -420,7 +420,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
|
||||
|
||||
if (cmd->tag == TAG_NONE) {
|
||||
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
|
||||
TAG_PRINTK("scsi%d: target %d lun %d untagged cmd finished\n",
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
|
||||
H_NO(cmd), cmd->device->id, cmd->device->lun);
|
||||
} else if (cmd->tag >= MAX_TAGS) {
|
||||
printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
|
||||
@ -429,7 +429,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
|
||||
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
|
||||
clear_bit(cmd->tag, ta->allocated);
|
||||
ta->nr_allocated--;
|
||||
TAG_PRINTK("scsi%d: freed tag %d for target %d lun %d\n",
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
|
||||
H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun);
|
||||
}
|
||||
}
|
||||
@ -478,7 +478,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)
|
||||
for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;
|
||||
cmd->SCp.buffers_residual &&
|
||||
virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) {
|
||||
MER_PRINTK("VTOP(%p) == %08lx -> merging\n",
|
||||
dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n",
|
||||
page_address(sg_page(&cmd->SCp.buffer[1])), endaddr);
|
||||
#if (NDEBUG & NDEBUG_MERGING)
|
||||
++cnt;
|
||||
@ -490,7 +490,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)
|
||||
}
|
||||
#if (NDEBUG & NDEBUG_MERGING)
|
||||
if (oldlen != cmd->SCp.this_residual)
|
||||
MER_PRINTK("merged %d buffers from %p, new length %08x\n",
|
||||
dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n",
|
||||
cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
|
||||
#endif
|
||||
}
|
||||
@ -626,16 +626,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
|
||||
}
|
||||
}
|
||||
|
||||
#else /* !NDEBUG */
|
||||
|
||||
/* dummies... */
|
||||
static inline void NCR5380_print(struct Scsi_Host *instance)
|
||||
{
|
||||
};
|
||||
static inline void NCR5380_print_phase(struct Scsi_Host *instance)
|
||||
{
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -676,7 +666,7 @@ static inline void NCR5380_all_init(void)
|
||||
{
|
||||
static int done = 0;
|
||||
if (!done) {
|
||||
INI_PRINTK("scsi : NCR5380_all_init()\n");
|
||||
dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
|
||||
done = 1;
|
||||
}
|
||||
}
|
||||
@ -739,8 +729,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
|
||||
Scsi_Cmnd *ptr;
|
||||
unsigned long flags;
|
||||
|
||||
NCR_PRINT(NDEBUG_ANY);
|
||||
NCR_PRINT_PHASE(NDEBUG_ANY);
|
||||
NCR5380_dprint(NDEBUG_ANY, instance);
|
||||
NCR5380_dprint_phase(NDEBUG_ANY, instance);
|
||||
|
||||
hostdata = (struct NCR5380_hostdata *)instance->hostdata;
|
||||
|
||||
@ -984,7 +974,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
||||
QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd),
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),
|
||||
(cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
|
||||
|
||||
/* If queue_command() is called from an interrupt (real one or bottom
|
||||
@ -1054,7 +1044,7 @@ static void NCR5380_main(struct work_struct *work)
|
||||
done = 1;
|
||||
|
||||
if (!hostdata->connected) {
|
||||
MAIN_PRINTK("scsi%d: not connected\n", HOSTNO);
|
||||
dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO);
|
||||
/*
|
||||
* Search through the issue_queue for a command destined
|
||||
* for a target that's not busy.
|
||||
@ -1107,7 +1097,7 @@ static void NCR5380_main(struct work_struct *work)
|
||||
* On failure, we must add the command back to the
|
||||
* issue queue so we can keep trying.
|
||||
*/
|
||||
MAIN_PRINTK("scsi%d: main(): command for target %d "
|
||||
dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
|
||||
"lun %d removed from issue_queue\n",
|
||||
HOSTNO, tmp->device->id, tmp->device->lun);
|
||||
/*
|
||||
@ -1140,7 +1130,7 @@ static void NCR5380_main(struct work_struct *work)
|
||||
#endif
|
||||
falcon_dont_release--;
|
||||
local_irq_restore(flags);
|
||||
MAIN_PRINTK("scsi%d: main(): select() failed, "
|
||||
dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
|
||||
"returned to issue_queue\n", HOSTNO);
|
||||
if (hostdata->connected)
|
||||
break;
|
||||
@ -1155,10 +1145,10 @@ static void NCR5380_main(struct work_struct *work)
|
||||
#endif
|
||||
) {
|
||||
local_irq_restore(flags);
|
||||
MAIN_PRINTK("scsi%d: main: performing information transfer\n",
|
||||
dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",
|
||||
HOSTNO);
|
||||
NCR5380_information_transfer(instance);
|
||||
MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO);
|
||||
dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);
|
||||
done = 0;
|
||||
}
|
||||
} while (!done);
|
||||
@ -1204,12 +1194,12 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
|
||||
(BASR_PHASE_MATCH|BASR_ACK)) {
|
||||
saved_data = NCR5380_read(INPUT_DATA_REG);
|
||||
overrun = 1;
|
||||
DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO);
|
||||
dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
|
||||
dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
|
||||
HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
|
||||
NCR5380_read(STATUS_REG));
|
||||
|
||||
@ -1229,13 +1219,13 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
|
||||
if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
|
||||
cnt = toPIO = atari_read_overruns;
|
||||
if (overrun) {
|
||||
DMA_PRINTK("Got an input overrun, using saved byte\n");
|
||||
dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
|
||||
*(*data)++ = saved_data;
|
||||
(*count)--;
|
||||
cnt--;
|
||||
toPIO--;
|
||||
}
|
||||
DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);
|
||||
dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);
|
||||
NCR5380_transfer_pio(instance, &p, &cnt, data);
|
||||
*count -= toPIO - cnt;
|
||||
}
|
||||
@ -1261,25 +1251,25 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
|
||||
int done = 1, handled = 0;
|
||||
unsigned char basr;
|
||||
|
||||
INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);
|
||||
|
||||
/* Look for pending interrupts */
|
||||
basr = NCR5380_read(BUS_AND_STATUS_REG);
|
||||
INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);
|
||||
/* dispatch to appropriate routine if found and done=0 */
|
||||
if (basr & BASR_IRQ) {
|
||||
NCR_PRINT(NDEBUG_INTR);
|
||||
NCR5380_dprint(NDEBUG_INTR, instance);
|
||||
if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
|
||||
done = 0;
|
||||
ENABLE_IRQ();
|
||||
INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
|
||||
NCR5380_reselect(instance);
|
||||
(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
} else if (basr & BASR_PARITY_ERROR) {
|
||||
INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);
|
||||
(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
} else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
|
||||
INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);
|
||||
(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
} else {
|
||||
/*
|
||||
@ -1298,7 +1288,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
|
||||
((basr & BASR_END_DMA_TRANSFER) ||
|
||||
!(basr & BASR_PHASE_MATCH))) {
|
||||
|
||||
INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
|
||||
NCR5380_dma_complete( instance );
|
||||
done = 0;
|
||||
ENABLE_IRQ();
|
||||
@ -1323,7 +1313,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
if (!done) {
|
||||
INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
|
||||
/* Put a call to NCR5380_main() on the queue... */
|
||||
queue_main();
|
||||
}
|
||||
@ -1396,8 +1386,8 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
|
||||
unsigned long flags;
|
||||
|
||||
hostdata->restart_select = 0;
|
||||
NCR_PRINT(NDEBUG_ARBITRATION);
|
||||
ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO,
|
||||
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,
|
||||
instance->this_id);
|
||||
|
||||
/*
|
||||
@ -1442,7 +1432,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
|
||||
;
|
||||
#endif
|
||||
|
||||
ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO);
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);
|
||||
|
||||
if (hostdata->connected) {
|
||||
NCR5380_write(MODE_REG, MR_BASE);
|
||||
@ -1463,7 +1453,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
|
||||
(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
|
||||
hostdata->connected) {
|
||||
NCR5380_write(MODE_REG, MR_BASE);
|
||||
ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
|
||||
HOSTNO);
|
||||
return -1;
|
||||
}
|
||||
@ -1478,7 +1468,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
|
||||
hostdata->connected) {
|
||||
NCR5380_write(MODE_REG, MR_BASE);
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
|
||||
HOSTNO);
|
||||
return -1;
|
||||
}
|
||||
@ -1501,7 +1491,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
|
||||
return -1;
|
||||
}
|
||||
|
||||
ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO);
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);
|
||||
|
||||
/*
|
||||
* Now that we have won arbitration, start Selection process, asserting
|
||||
@ -1561,7 +1551,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
|
||||
|
||||
udelay(1);
|
||||
|
||||
SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
|
||||
|
||||
/*
|
||||
* The SCSI specification calls for a 250 ms timeout for the actual
|
||||
@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
|
||||
printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
|
||||
if (hostdata->restart_select)
|
||||
printk(KERN_NOTICE "\trestart select\n");
|
||||
NCR_PRINT(NDEBUG_ANY);
|
||||
NCR5380_dprint(NDEBUG_ANY, instance);
|
||||
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
|
||||
return -1;
|
||||
}
|
||||
@ -1630,7 +1620,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
|
||||
#endif
|
||||
cmd->scsi_done(cmd);
|
||||
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
|
||||
SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO);
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);
|
||||
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
|
||||
return 0;
|
||||
}
|
||||
@ -1656,7 +1646,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
|
||||
while (!(NCR5380_read(STATUS_REG) & SR_REQ))
|
||||
;
|
||||
|
||||
SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
|
||||
HOSTNO, cmd->device->id);
|
||||
tmp[0] = IDENTIFY(1, cmd->device->lun);
|
||||
|
||||
@ -1676,7 +1666,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
|
||||
data = tmp;
|
||||
phase = PHASE_MSGOUT;
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data);
|
||||
SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO);
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);
|
||||
/* XXX need to handle errors here */
|
||||
hostdata->connected = cmd;
|
||||
#ifndef SUPPORT_TAGS
|
||||
@ -1737,12 +1727,12 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
|
||||
while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ))
|
||||
;
|
||||
|
||||
HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO);
|
||||
dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);
|
||||
|
||||
/* Check for phase mismatch */
|
||||
if ((tmp & PHASE_MASK) != p) {
|
||||
PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO);
|
||||
NCR_PRINT_PHASE(NDEBUG_PIO);
|
||||
dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO);
|
||||
NCR5380_dprint_phase(NDEBUG_PIO, instance);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1764,25 +1754,25 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
|
||||
if (!(p & SR_IO)) {
|
||||
if (!((p & SR_MSG) && c > 1)) {
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
|
||||
NCR_PRINT(NDEBUG_PIO);
|
||||
NCR5380_dprint(NDEBUG_PIO, instance);
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
|
||||
ICR_ASSERT_DATA | ICR_ASSERT_ACK);
|
||||
} else {
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
|
||||
ICR_ASSERT_DATA | ICR_ASSERT_ATN);
|
||||
NCR_PRINT(NDEBUG_PIO);
|
||||
NCR5380_dprint(NDEBUG_PIO, instance);
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
|
||||
ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
|
||||
}
|
||||
} else {
|
||||
NCR_PRINT(NDEBUG_PIO);
|
||||
NCR5380_dprint(NDEBUG_PIO, instance);
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
|
||||
}
|
||||
|
||||
while (NCR5380_read(STATUS_REG) & SR_REQ)
|
||||
;
|
||||
|
||||
HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO);
|
||||
dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);
|
||||
|
||||
/*
|
||||
* We have several special cases to consider during REQ/ACK handshaking :
|
||||
@ -1803,7 +1793,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
|
||||
}
|
||||
} while (--c);
|
||||
|
||||
PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c);
|
||||
dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);
|
||||
|
||||
*count = c;
|
||||
*data = d;
|
||||
@ -1917,7 +1907,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
|
||||
if (atari_read_overruns && (p & SR_IO))
|
||||
c -= atari_read_overruns;
|
||||
|
||||
DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n",
|
||||
dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
|
||||
HOSTNO, (p & SR_IO) ? "reading" : "writing",
|
||||
c, (p & SR_IO) ? "to" : "from", d);
|
||||
|
||||
@ -1997,7 +1987,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
phase = (tmp & PHASE_MASK);
|
||||
if (phase != old_phase) {
|
||||
old_phase = phase;
|
||||
NCR_PRINT_PHASE(NDEBUG_INFORMATION);
|
||||
NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
|
||||
}
|
||||
|
||||
if (sink && (phase != PHASE_MSGOUT)) {
|
||||
@ -2039,7 +2029,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
* they are at contiguous physical addresses.
|
||||
*/
|
||||
merge_contiguous_buffers(cmd);
|
||||
INF_PRINTK("scsi%d: %d bytes and %d buffers left\n",
|
||||
dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",
|
||||
HOSTNO, cmd->SCp.this_residual,
|
||||
cmd->SCp.buffers_residual);
|
||||
}
|
||||
@ -2123,7 +2113,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
/* Accept message by clearing ACK */
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
|
||||
LNK_PRINTK("scsi%d: target %d lun %d linked command "
|
||||
dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "
|
||||
"complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
|
||||
|
||||
/* Enable reselect interrupts */
|
||||
@ -2148,7 +2138,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
* and don't free it! */
|
||||
cmd->next_link->tag = cmd->tag;
|
||||
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
|
||||
LNK_PRINTK("scsi%d: target %d lun %d linked request "
|
||||
dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "
|
||||
"done, calling scsi_done().\n",
|
||||
HOSTNO, cmd->device->id, cmd->device->lun);
|
||||
#ifdef NCR5380_STATS
|
||||
@ -2165,7 +2155,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
/* ++guenther: possible race with Falcon locking */
|
||||
falcon_dont_release++;
|
||||
hostdata->connected = NULL;
|
||||
QU_PRINTK("scsi%d: command for target %d, lun %d "
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "
|
||||
"completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
|
||||
#ifdef SUPPORT_TAGS
|
||||
cmd_free_tag(cmd);
|
||||
@ -2179,7 +2169,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
/* ++Andreas: the mid level code knows about
|
||||
QUEUE_FULL now. */
|
||||
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
|
||||
TAG_PRINTK("scsi%d: target %d lun %d returned "
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "
|
||||
"QUEUE_FULL after %d commands\n",
|
||||
HOSTNO, cmd->device->id, cmd->device->lun,
|
||||
ta->nr_allocated);
|
||||
@ -2224,14 +2214,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
(status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
|
||||
scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
|
||||
|
||||
ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO);
|
||||
dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO);
|
||||
|
||||
local_irq_save(flags);
|
||||
LIST(cmd,hostdata->issue_queue);
|
||||
SET_NEXT(cmd, hostdata->issue_queue);
|
||||
hostdata->issue_queue = (Scsi_Cmnd *) cmd;
|
||||
local_irq_restore(flags);
|
||||
QU_PRINTK("scsi%d: REQUEST SENSE added to head of "
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
|
||||
"issue queue\n", H_NO(cmd));
|
||||
} else
|
||||
#endif /* def AUTOSENSE */
|
||||
@ -2277,7 +2267,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
cmd->device->tagged_supported = 0;
|
||||
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
|
||||
cmd->tag = TAG_NONE;
|
||||
TAG_PRINTK("scsi%d: target %d lun %d rejected "
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "
|
||||
"QUEUE_TAG message; tagged queuing "
|
||||
"disabled\n",
|
||||
HOSTNO, cmd->device->id, cmd->device->lun);
|
||||
@ -2294,7 +2284,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
hostdata->connected = NULL;
|
||||
hostdata->disconnected_queue = cmd;
|
||||
local_irq_restore(flags);
|
||||
QU_PRINTK("scsi%d: command for target %d lun %d was "
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "
|
||||
"moved from connected to the "
|
||||
"disconnected_queue\n", HOSTNO,
|
||||
cmd->device->id, cmd->device->lun);
|
||||
@ -2344,13 +2334,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
/* Accept first byte by clearing ACK */
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
|
||||
EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO);
|
||||
dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);
|
||||
|
||||
len = 2;
|
||||
data = extended_msg + 1;
|
||||
phase = PHASE_MSGIN;
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data);
|
||||
EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO,
|
||||
dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,
|
||||
(int)extended_msg[1], (int)extended_msg[2]);
|
||||
|
||||
if (!len && extended_msg[1] <=
|
||||
@ -2362,7 +2352,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
phase = PHASE_MSGIN;
|
||||
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data);
|
||||
EXT_PRINTK("scsi%d: message received, residual %d\n",
|
||||
dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",
|
||||
HOSTNO, len);
|
||||
|
||||
switch (extended_msg[2]) {
|
||||
@ -2451,7 +2441,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
||||
break;
|
||||
default:
|
||||
printk("scsi%d: unknown phase\n", HOSTNO);
|
||||
NCR_PRINT(NDEBUG_ANY);
|
||||
NCR5380_dprint(NDEBUG_ANY, instance);
|
||||
} /* switch(phase) */
|
||||
} /* if (tmp * SR_REQ) */
|
||||
} /* while (1) */
|
||||
@ -2493,7 +2483,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
|
||||
|
||||
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
|
||||
|
||||
RSL_PRINTK("scsi%d: reselect\n", HOSTNO);
|
||||
dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);
|
||||
|
||||
/*
|
||||
* At this point, we have detected that our SCSI ID is on the bus,
|
||||
@ -2544,7 +2534,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
|
||||
if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
|
||||
msg[1] == SIMPLE_QUEUE_TAG)
|
||||
tag = msg[2];
|
||||
TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at "
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "
|
||||
"reselection\n", HOSTNO, target_mask, lun, tag);
|
||||
}
|
||||
#endif
|
||||
@ -2598,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
|
||||
hostdata->connected = tmp;
|
||||
RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
|
||||
dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
|
||||
HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
|
||||
falcon_dont_release--;
|
||||
}
|
||||
@ -2640,7 +2630,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
|
||||
printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n",
|
||||
HOSTNO);
|
||||
|
||||
ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
|
||||
NCR5380_read(BUS_AND_STATUS_REG),
|
||||
NCR5380_read(STATUS_REG));
|
||||
|
||||
@ -2653,7 +2643,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
|
||||
|
||||
if (hostdata->connected == cmd) {
|
||||
|
||||
ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO);
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);
|
||||
/*
|
||||
* We should perform BSY checking, and make sure we haven't slipped
|
||||
* into BUS FREE.
|
||||
@ -2683,11 +2673,11 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
|
||||
local_irq_restore(flags);
|
||||
cmd->scsi_done(cmd);
|
||||
falcon_release_lock_if_possible(hostdata);
|
||||
return SCSI_ABORT_SUCCESS;
|
||||
return SUCCESS;
|
||||
} else {
|
||||
/* local_irq_restore(flags); */
|
||||
printk("scsi%d: abort of connected command failed!\n", HOSTNO);
|
||||
return SCSI_ABORT_ERROR;
|
||||
return FAILED;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -2705,13 +2695,13 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
|
||||
SET_NEXT(tmp, NULL);
|
||||
tmp->result = DID_ABORT << 16;
|
||||
local_irq_restore(flags);
|
||||
ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n",
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
|
||||
HOSTNO);
|
||||
/* Tagged queuing note: no tag to free here, hasn't been assigned
|
||||
* yet... */
|
||||
tmp->scsi_done(tmp);
|
||||
falcon_release_lock_if_possible(hostdata);
|
||||
return SCSI_ABORT_SUCCESS;
|
||||
return SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2728,8 +2718,8 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
|
||||
|
||||
if (hostdata->connected) {
|
||||
local_irq_restore(flags);
|
||||
ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO);
|
||||
return SCSI_ABORT_SNOOZE;
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2761,12 +2751,12 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
|
||||
tmp = NEXT(tmp)) {
|
||||
if (cmd == tmp) {
|
||||
local_irq_restore(flags);
|
||||
ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO);
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
|
||||
|
||||
if (NCR5380_select(instance, cmd, (int)cmd->tag))
|
||||
return SCSI_ABORT_BUSY;
|
||||
return FAILED;
|
||||
|
||||
ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO);
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
|
||||
|
||||
do_abort(instance);
|
||||
|
||||
@ -2791,7 +2781,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
|
||||
local_irq_restore(flags);
|
||||
tmp->scsi_done(tmp);
|
||||
falcon_release_lock_if_possible(hostdata);
|
||||
return SCSI_ABORT_SUCCESS;
|
||||
return SUCCESS;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2816,7 +2806,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
|
||||
*/
|
||||
falcon_release_lock_if_possible(hostdata);
|
||||
|
||||
return SCSI_ABORT_NOT_RUNNING;
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
|
||||
@ -2825,7 +2815,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
|
||||
*
|
||||
* Purpose : reset the SCSI bus.
|
||||
*
|
||||
* Returns : SCSI_RESET_WAKEUP
|
||||
* Returns : SUCCESS or FAILURE
|
||||
*
|
||||
*/
|
||||
|
||||
@ -2834,7 +2824,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
|
||||
SETUP_HOSTDATA(cmd->device->host);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
#if 1
|
||||
#if defined(RESET_RUN_DONE)
|
||||
Scsi_Cmnd *connected, *disconnected_queue;
|
||||
#endif
|
||||
|
||||
@ -2859,7 +2849,14 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
|
||||
* through anymore ... */
|
||||
(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
|
||||
#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */
|
||||
/* MSch 20140115 - looking at the generic NCR5380 driver, all of this
|
||||
* should go.
|
||||
* Catch-22: if we don't clear all queues, the SCSI driver lock will
|
||||
* not be reset by atari_scsi_reset()!
|
||||
*/
|
||||
|
||||
#if defined(RESET_RUN_DONE)
|
||||
/* XXX Should now be done by midlevel code, but it's broken XXX */
|
||||
/* XXX see below XXX */
|
||||
|
||||
/* MSch: old-style reset: actually abort all command processing here */
|
||||
@ -2890,7 +2887,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
|
||||
*/
|
||||
|
||||
if ((cmd = connected)) {
|
||||
ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
|
||||
cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
|
||||
cmd->scsi_done(cmd);
|
||||
}
|
||||
@ -2902,7 +2899,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
|
||||
cmd->scsi_done(cmd);
|
||||
}
|
||||
if (i > 0)
|
||||
ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i);
|
||||
dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
|
||||
|
||||
/* The Falcon lock should be released after a reset...
|
||||
*/
|
||||
@ -2915,7 +2912,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
|
||||
* the midlevel code that the reset was SUCCESSFUL, and there is no
|
||||
* need to 'wake up' the commands by a request_sense
|
||||
*/
|
||||
return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET;
|
||||
return SUCCESS;
|
||||
#else /* 1 */
|
||||
|
||||
/* MSch: new-style reset handling: let the mid-level do what it can */
|
||||
@ -2942,11 +2939,11 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
|
||||
*/
|
||||
|
||||
if (hostdata->issue_queue)
|
||||
ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
|
||||
if (hostdata->connected)
|
||||
ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
|
||||
if (hostdata->disconnected_queue)
|
||||
ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
|
||||
|
||||
local_irq_save(flags);
|
||||
hostdata->issue_queue = NULL;
|
||||
@ -2963,6 +2960,6 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* we did no complete reset of all commands, so a wakeup is required */
|
||||
return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET;
|
||||
return SUCCESS;
|
||||
#endif /* 1 */
|
||||
}
|
||||
|
@ -67,12 +67,6 @@
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#define NDEBUG (0)
|
||||
|
||||
#define NDEBUG_ABORT 0x00100000
|
||||
#define NDEBUG_TAGS 0x00200000
|
||||
#define NDEBUG_MERGING 0x00400000
|
||||
|
||||
#define AUTOSENSE
|
||||
/* For the Atari version, use only polled IO or REAL_DMA */
|
||||
#define REAL_DMA
|
||||
@ -314,7 +308,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
|
||||
|
||||
dma_stat = tt_scsi_dma.dma_ctrl;
|
||||
|
||||
INT_PRINTK("scsi%d: NCR5380 interrupt, DMA status = %02x\n",
|
||||
dprintk(NDEBUG_INTR, "scsi%d: NCR5380 interrupt, DMA status = %02x\n",
|
||||
atari_scsi_host->host_no, dma_stat & 0xff);
|
||||
|
||||
/* Look if it was the DMA that has interrupted: First possibility
|
||||
@ -340,7 +334,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
|
||||
if ((dma_stat & 0x02) && !(dma_stat & 0x40)) {
|
||||
atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr);
|
||||
|
||||
DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n",
|
||||
dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",
|
||||
atari_dma_residual);
|
||||
|
||||
if ((signed int)atari_dma_residual < 0)
|
||||
@ -371,7 +365,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
|
||||
* other command. These shouldn't disconnect anyway.
|
||||
*/
|
||||
if (atari_dma_residual & 0x1ff) {
|
||||
DMA_PRINTK("SCSI DMA: DMA bug corrected, "
|
||||
dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, "
|
||||
"difference %ld bytes\n",
|
||||
512 - (atari_dma_residual & 0x1ff));
|
||||
atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff;
|
||||
@ -438,7 +432,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy)
|
||||
"ST-DMA fifo\n", transferred & 15);
|
||||
|
||||
atari_dma_residual = HOSTDATA_DMALEN - transferred;
|
||||
DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n",
|
||||
dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",
|
||||
atari_dma_residual);
|
||||
} else
|
||||
atari_dma_residual = 0;
|
||||
@ -474,11 +468,11 @@ static void atari_scsi_fetch_restbytes(void)
|
||||
/* there are 'nr' bytes left for the last long address
|
||||
before the DMA pointer */
|
||||
phys_dst ^= nr;
|
||||
DMA_PRINTK("SCSI DMA: there are %d rest bytes for phys addr 0x%08lx",
|
||||
dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx",
|
||||
nr, phys_dst);
|
||||
/* The content of the DMA pointer is a physical address! */
|
||||
dst = phys_to_virt(phys_dst);
|
||||
DMA_PRINTK(" = virt addr %p\n", dst);
|
||||
dprintk(NDEBUG_DMA, " = virt addr %p\n", dst);
|
||||
for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr)
|
||||
*dst++ = *src++;
|
||||
}
|
||||
@ -827,7 +821,7 @@ static int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
|
||||
} else {
|
||||
atari_turnon_irq(IRQ_MFP_FSCSI);
|
||||
}
|
||||
if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS)
|
||||
if (rv == SUCCESS)
|
||||
falcon_release_lock_if_possible(hostdata);
|
||||
|
||||
return rv;
|
||||
@ -883,7 +877,7 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
|
||||
{
|
||||
unsigned long addr = virt_to_phys(data);
|
||||
|
||||
DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
|
||||
dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
|
||||
"dir = %d\n", instance->host_no, data, addr, count, dir);
|
||||
|
||||
if (!IS_A_TT() && !STRAM_ADDR(addr)) {
|
||||
@ -1063,7 +1057,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
|
||||
possible_len = limit;
|
||||
|
||||
if (possible_len != wanted_len)
|
||||
DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes "
|
||||
dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes "
|
||||
"instead of %ld\n", possible_len, wanted_len);
|
||||
|
||||
return possible_len;
|
||||
|
@ -54,125 +54,6 @@
|
||||
#define NCR5380_dma_xfer_len(i,cmd,phase) \
|
||||
atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1)
|
||||
|
||||
/* former generic SCSI error handling stuff */
|
||||
|
||||
#define SCSI_ABORT_SNOOZE 0
|
||||
#define SCSI_ABORT_SUCCESS 1
|
||||
#define SCSI_ABORT_PENDING 2
|
||||
#define SCSI_ABORT_BUSY 3
|
||||
#define SCSI_ABORT_NOT_RUNNING 4
|
||||
#define SCSI_ABORT_ERROR 5
|
||||
|
||||
#define SCSI_RESET_SNOOZE 0
|
||||
#define SCSI_RESET_PUNT 1
|
||||
#define SCSI_RESET_SUCCESS 2
|
||||
#define SCSI_RESET_PENDING 3
|
||||
#define SCSI_RESET_WAKEUP 4
|
||||
#define SCSI_RESET_NOT_RUNNING 5
|
||||
#define SCSI_RESET_ERROR 6
|
||||
|
||||
#define SCSI_RESET_SYNCHRONOUS 0x01
|
||||
#define SCSI_RESET_ASYNCHRONOUS 0x02
|
||||
#define SCSI_RESET_SUGGEST_BUS_RESET 0x04
|
||||
#define SCSI_RESET_SUGGEST_HOST_RESET 0x08
|
||||
|
||||
#define SCSI_RESET_BUS_RESET 0x100
|
||||
#define SCSI_RESET_HOST_RESET 0x200
|
||||
#define SCSI_RESET_ACTION 0xff
|
||||
|
||||
/* Debugging printk definitions:
|
||||
*
|
||||
* ARB -> arbitration
|
||||
* ASEN -> auto-sense
|
||||
* DMA -> DMA
|
||||
* HSH -> PIO handshake
|
||||
* INF -> information transfer
|
||||
* INI -> initialization
|
||||
* INT -> interrupt
|
||||
* LNK -> linked commands
|
||||
* MAIN -> NCR5380_main() control flow
|
||||
* NDAT -> no data-out phase
|
||||
* NWR -> no write commands
|
||||
* PIO -> PIO transfers
|
||||
* PDMA -> pseudo DMA (unused on Atari)
|
||||
* QU -> queues
|
||||
* RSL -> reselections
|
||||
* SEL -> selections
|
||||
* USL -> usleep cpde (unused on Atari)
|
||||
* LBS -> last byte sent (unused on Atari)
|
||||
* RSS -> restarting of selections
|
||||
* EXT -> extended messages
|
||||
* ABRT -> aborting and resetting
|
||||
* TAG -> queue tag handling
|
||||
* MER -> merging of consec. buffers
|
||||
*
|
||||
*/
|
||||
|
||||
#define dprint(flg, format...) \
|
||||
({ \
|
||||
if (NDEBUG & (flg)) \
|
||||
printk(KERN_DEBUG format); \
|
||||
})
|
||||
|
||||
#define ARB_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_ARBITRATION, format , ## args)
|
||||
#define ASEN_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_AUTOSENSE, format , ## args)
|
||||
#define DMA_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_DMA, format , ## args)
|
||||
#define HSH_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_HANDSHAKE, format , ## args)
|
||||
#define INF_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_INFORMATION, format , ## args)
|
||||
#define INI_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_INIT, format , ## args)
|
||||
#define INT_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_INTR, format , ## args)
|
||||
#define LNK_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_LINKED, format , ## args)
|
||||
#define MAIN_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_MAIN, format , ## args)
|
||||
#define NDAT_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_NO_DATAOUT, format , ## args)
|
||||
#define NWR_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_NO_WRITE, format , ## args)
|
||||
#define PIO_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_PIO, format , ## args)
|
||||
#define PDMA_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_PSEUDO_DMA, format , ## args)
|
||||
#define QU_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_QUEUES, format , ## args)
|
||||
#define RSL_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_RESELECTION, format , ## args)
|
||||
#define SEL_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_SELECTION, format , ## args)
|
||||
#define USL_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_USLEEP, format , ## args)
|
||||
#define LBS_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_LAST_BYTE_SENT, format , ## args)
|
||||
#define RSS_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_RESTART_SELECT, format , ## args)
|
||||
#define EXT_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_EXTENDED, format , ## args)
|
||||
#define ABRT_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_ABORT, format , ## args)
|
||||
#define TAG_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_TAGS, format , ## args)
|
||||
#define MER_PRINTK(format, args...) \
|
||||
dprint(NDEBUG_MERGING, format , ## args)
|
||||
|
||||
/* conditional macros for NCR5380_print_{,phase,status} */
|
||||
|
||||
#define NCR_PRINT(mask) \
|
||||
((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0)
|
||||
|
||||
#define NCR_PRINT_PHASE(mask) \
|
||||
((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0)
|
||||
|
||||
#define NCR_PRINT_STATUS(mask) \
|
||||
((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0)
|
||||
|
||||
|
||||
#endif /* ndef ASM */
|
||||
#endif /* ATARI_SCSI_H */
|
||||
|
||||
|
@ -83,9 +83,20 @@ static inline void queue_tail_inc(struct be_queue_info *q)
|
||||
|
||||
/*ISCSI */
|
||||
|
||||
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
|
||||
bool enable;
|
||||
u32 min_eqd; /* in usecs */
|
||||
u32 max_eqd; /* in usecs */
|
||||
u32 prev_eqd; /* in usecs */
|
||||
u32 et_eqd; /* configured val when aic is off */
|
||||
ulong jiffs;
|
||||
u64 eq_prev; /* Used to calculate eqe */
|
||||
};
|
||||
|
||||
struct be_eq_obj {
|
||||
bool todo_mcc_cq;
|
||||
bool todo_cq;
|
||||
u32 cq_count;
|
||||
struct be_queue_info q;
|
||||
struct beiscsi_hba *phba;
|
||||
struct be_queue_info *cq;
|
||||
|
@ -71,6 +71,7 @@ struct be_mcc_wrb {
|
||||
#define BEISCSI_FW_MBX_TIMEOUT 100
|
||||
|
||||
/* MBOX Command VER */
|
||||
#define MBX_CMD_VER1 0x01
|
||||
#define MBX_CMD_VER2 0x02
|
||||
|
||||
struct be_mcc_compl {
|
||||
@ -271,6 +272,12 @@ struct be_cmd_resp_eq_create {
|
||||
u16 rsvd0; /* sword */
|
||||
} __packed;
|
||||
|
||||
struct be_set_eqd {
|
||||
u32 eq_id;
|
||||
u32 phase;
|
||||
u32 delay_multiplier;
|
||||
} __packed;
|
||||
|
||||
struct mgmt_chap_format {
|
||||
u32 flags;
|
||||
u8 intr_chap_name[256];
|
||||
@ -622,7 +629,7 @@ struct be_cmd_req_modify_eq_delay {
|
||||
u32 eq_id;
|
||||
u32 phase;
|
||||
u32 delay_multiplier;
|
||||
} delay[8];
|
||||
} delay[MAX_CPUS];
|
||||
} __packed;
|
||||
|
||||
/******************** Get MAC ADDR *******************/
|
||||
@ -708,6 +715,8 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
|
||||
|
||||
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
|
||||
|
||||
int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
|
||||
int num);
|
||||
int beiscsi_mccq_compl(struct beiscsi_hba *phba,
|
||||
uint32_t tag, struct be_mcc_wrb **wrb,
|
||||
struct be_dma_mem *mbx_cmd_mem);
|
||||
@ -1005,6 +1014,26 @@ struct tcp_connect_and_offload_in {
|
||||
u8 rsvd0[3];
|
||||
} __packed;
|
||||
|
||||
struct tcp_connect_and_offload_in_v1 {
|
||||
struct be_cmd_req_hdr hdr;
|
||||
struct ip_addr_format ip_address;
|
||||
u16 tcp_port;
|
||||
u16 cid;
|
||||
u16 cq_id;
|
||||
u16 defq_id;
|
||||
struct phys_addr dataout_template_pa;
|
||||
u16 hdr_ring_id;
|
||||
u16 data_ring_id;
|
||||
u8 do_offload;
|
||||
u8 ifd_state;
|
||||
u8 rsvd0[2];
|
||||
u16 tcp_window_size;
|
||||
u8 tcp_window_scale_count;
|
||||
u8 rsvd1;
|
||||
u32 tcp_mss:24;
|
||||
u8 rsvd2;
|
||||
} __packed;
|
||||
|
||||
struct tcp_connect_and_offload_out {
|
||||
struct be_cmd_resp_hdr hdr;
|
||||
u32 connection_handle;
|
||||
|
@ -1106,7 +1106,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
|
||||
struct beiscsi_hba *phba = beiscsi_ep->phba;
|
||||
struct tcp_connect_and_offload_out *ptcpcnct_out;
|
||||
struct be_dma_mem nonemb_cmd;
|
||||
unsigned int tag;
|
||||
unsigned int tag, req_memsize;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
|
||||
@ -1127,8 +1127,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
|
||||
(beiscsi_ep->ep_cid)] = ep;
|
||||
|
||||
beiscsi_ep->cid_vld = 0;
|
||||
|
||||
if (is_chip_be2_be3r(phba))
|
||||
req_memsize = sizeof(struct tcp_connect_and_offload_in);
|
||||
else
|
||||
req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
|
||||
|
||||
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
|
||||
sizeof(struct tcp_connect_and_offload_in),
|
||||
req_memsize,
|
||||
&nonemb_cmd.dma);
|
||||
if (nonemb_cmd.va == NULL) {
|
||||
|
||||
@ -1139,7 +1145,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
|
||||
beiscsi_free_ep(beiscsi_ep);
|
||||
return -ENOMEM;
|
||||
}
|
||||
nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
|
||||
nonemb_cmd.size = req_memsize;
|
||||
memset(nonemb_cmd.va, 0, nonemb_cmd.size);
|
||||
tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
|
||||
if (tag <= 0) {
|
||||
|
@ -599,15 +599,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
|
||||
pci_set_drvdata(pcidev, phba);
|
||||
phba->interface_handle = 0xFFFFFFFF;
|
||||
|
||||
if (iscsi_host_add(shost, &phba->pcidev->dev))
|
||||
goto free_devices;
|
||||
|
||||
return phba;
|
||||
|
||||
free_devices:
|
||||
pci_dev_put(phba->pcidev);
|
||||
iscsi_host_free(phba->shost);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
|
||||
@ -2279,6 +2271,7 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
|
||||
|
||||
pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
|
||||
ret = beiscsi_process_cq(pbe_eq);
|
||||
pbe_eq->cq_count += ret;
|
||||
if (ret < budget) {
|
||||
phba = pbe_eq->phba;
|
||||
blk_iopoll_complete(iop);
|
||||
@ -3692,7 +3685,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
|
||||
struct hwi_controller *phwi_ctrlr;
|
||||
struct hwi_context_memory *phwi_context;
|
||||
struct hwi_async_pdu_context *pasync_ctx;
|
||||
int i, eq_num, ulp_num;
|
||||
int i, eq_for_mcc, ulp_num;
|
||||
|
||||
phwi_ctrlr = phba->phwi_ctrlr;
|
||||
phwi_context = phwi_ctrlr->phwi_ctxt;
|
||||
@ -3729,16 +3722,17 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
|
||||
if (q->created)
|
||||
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
|
||||
}
|
||||
|
||||
be_mcc_queues_destroy(phba);
|
||||
if (phba->msix_enabled)
|
||||
eq_num = 1;
|
||||
eq_for_mcc = 1;
|
||||
else
|
||||
eq_num = 0;
|
||||
for (i = 0; i < (phba->num_cpus + eq_num); i++) {
|
||||
eq_for_mcc = 0;
|
||||
for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
|
||||
q = &phwi_context->be_eq[i].q;
|
||||
if (q->created)
|
||||
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
|
||||
}
|
||||
be_mcc_queues_destroy(phba);
|
||||
be_cmd_fw_uninit(ctrl);
|
||||
}
|
||||
|
||||
@ -3833,9 +3827,9 @@ static int hwi_init_port(struct beiscsi_hba *phba)
|
||||
|
||||
phwi_ctrlr = phba->phwi_ctrlr;
|
||||
phwi_context = phwi_ctrlr->phwi_ctxt;
|
||||
phwi_context->max_eqd = 0;
|
||||
phwi_context->max_eqd = 128;
|
||||
phwi_context->min_eqd = 0;
|
||||
phwi_context->cur_eqd = 64;
|
||||
phwi_context->cur_eqd = 0;
|
||||
be_cmd_fw_initialize(&phba->ctrl);
|
||||
|
||||
status = beiscsi_create_eqs(phba, phwi_context);
|
||||
@ -5290,6 +5284,57 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba)
|
||||
return;
|
||||
}
|
||||
|
||||
static void be_eqd_update(struct beiscsi_hba *phba)
|
||||
{
|
||||
struct be_set_eqd set_eqd[MAX_CPUS];
|
||||
struct be_aic_obj *aic;
|
||||
struct be_eq_obj *pbe_eq;
|
||||
struct hwi_controller *phwi_ctrlr;
|
||||
struct hwi_context_memory *phwi_context;
|
||||
int eqd, i, num = 0;
|
||||
ulong now;
|
||||
u32 pps, delta;
|
||||
unsigned int tag;
|
||||
|
||||
phwi_ctrlr = phba->phwi_ctrlr;
|
||||
phwi_context = phwi_ctrlr->phwi_ctxt;
|
||||
|
||||
for (i = 0; i <= phba->num_cpus; i++) {
|
||||
aic = &phba->aic_obj[i];
|
||||
pbe_eq = &phwi_context->be_eq[i];
|
||||
now = jiffies;
|
||||
if (!aic->jiffs || time_before(now, aic->jiffs) ||
|
||||
pbe_eq->cq_count < aic->eq_prev) {
|
||||
aic->jiffs = now;
|
||||
aic->eq_prev = pbe_eq->cq_count;
|
||||
continue;
|
||||
}
|
||||
delta = jiffies_to_msecs(now - aic->jiffs);
|
||||
pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
|
||||
eqd = (pps / 1500) << 2;
|
||||
|
||||
if (eqd < 8)
|
||||
eqd = 0;
|
||||
eqd = min_t(u32, eqd, phwi_context->max_eqd);
|
||||
eqd = max_t(u32, eqd, phwi_context->min_eqd);
|
||||
|
||||
aic->jiffs = now;
|
||||
aic->eq_prev = pbe_eq->cq_count;
|
||||
|
||||
if (eqd != aic->prev_eqd) {
|
||||
set_eqd[num].delay_multiplier = (eqd * 65)/100;
|
||||
set_eqd[num].eq_id = pbe_eq->q.id;
|
||||
aic->prev_eqd = eqd;
|
||||
num++;
|
||||
}
|
||||
}
|
||||
if (num) {
|
||||
tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
|
||||
if (tag)
|
||||
beiscsi_mccq_compl(phba, tag, NULL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* beiscsi_hw_health_check()- Check adapter health
|
||||
* @work: work item to check HW health
|
||||
@ -5303,6 +5348,8 @@ beiscsi_hw_health_check(struct work_struct *work)
|
||||
container_of(work, struct beiscsi_hba,
|
||||
beiscsi_hw_check_task.work);
|
||||
|
||||
be_eqd_update(phba);
|
||||
|
||||
beiscsi_ue_detect(phba);
|
||||
|
||||
schedule_delayed_work(&phba->beiscsi_hw_check_task,
|
||||
@ -5579,7 +5626,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
|
||||
phba->ctrl.mcc_numtag[i + 1] = 0;
|
||||
phba->ctrl.mcc_tag_available++;
|
||||
memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
|
||||
sizeof(struct beiscsi_mcc_tag_state));
|
||||
sizeof(struct be_dma_mem));
|
||||
}
|
||||
|
||||
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
|
||||
@ -5621,6 +5668,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
|
||||
}
|
||||
hwi_enable_intr(phba);
|
||||
|
||||
if (iscsi_host_add(phba->shost, &phba->pcidev->dev))
|
||||
goto free_blkenbld;
|
||||
|
||||
if (beiscsi_setup_boot_info(phba))
|
||||
/*
|
||||
* log error but continue, because we may not be using
|
||||
|
@ -36,7 +36,7 @@
|
||||
#include <scsi/scsi_transport_iscsi.h>
|
||||
|
||||
#define DRV_NAME "be2iscsi"
|
||||
#define BUILD_STR "10.2.125.0"
|
||||
#define BUILD_STR "10.2.273.0"
|
||||
#define BE_NAME "Emulex OneConnect" \
|
||||
"Open-iSCSI Driver version" BUILD_STR
|
||||
#define DRV_DESC BE_NAME " " "Driver"
|
||||
@ -71,8 +71,8 @@
|
||||
|
||||
#define BEISCSI_SGLIST_ELEMENTS 30
|
||||
|
||||
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
|
||||
#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
|
||||
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
|
||||
#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */
|
||||
#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
|
||||
|
||||
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
|
||||
@ -427,6 +427,7 @@ struct beiscsi_hba {
|
||||
struct mgmt_session_info boot_sess;
|
||||
struct invalidate_command_table inv_tbl[128];
|
||||
|
||||
struct be_aic_obj aic_obj[MAX_CPUS];
|
||||
unsigned int attr_log_enable;
|
||||
int (*iotask_fn)(struct iscsi_task *,
|
||||
struct scatterlist *sg,
|
||||
|
@ -155,6 +155,43 @@ void beiscsi_ue_detect(struct beiscsi_hba *phba)
|
||||
}
|
||||
}
|
||||
|
||||
int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
|
||||
struct be_set_eqd *set_eqd, int num)
|
||||
{
|
||||
struct be_ctrl_info *ctrl = &phba->ctrl;
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_modify_eq_delay *req;
|
||||
unsigned int tag = 0;
|
||||
int i;
|
||||
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
tag = alloc_mcc_tag(phba);
|
||||
if (!tag) {
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return tag;
|
||||
}
|
||||
|
||||
wrb = wrb_from_mccq(phba);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
wrb->tag0 |= tag;
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
|
||||
OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
|
||||
|
||||
req->num_eq = cpu_to_le32(num);
|
||||
for (i = 0; i < num; i++) {
|
||||
req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
|
||||
req->delay[i].phase = 0;
|
||||
req->delay[i].delay_multiplier =
|
||||
cpu_to_le32(set_eqd[i].delay_multiplier);
|
||||
}
|
||||
|
||||
be_mcc_notify(phba);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return tag;
|
||||
}
|
||||
|
||||
/**
|
||||
* mgmt_reopen_session()- Reopen a session based on reopen_type
|
||||
* @phba: Device priv structure instance
|
||||
@ -447,8 +484,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
|
||||
struct be_dma_mem *nonemb_cmd)
|
||||
{
|
||||
struct be_cmd_resp_hdr *resp;
|
||||
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
|
||||
struct be_sge *mcc_sge = nonembedded_sgl(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_sge *mcc_sge;
|
||||
unsigned int tag = 0;
|
||||
struct iscsi_bsg_request *bsg_req = job->request;
|
||||
struct be_bsg_vendor_cmd *req = nonemb_cmd->va;
|
||||
@ -465,7 +502,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
|
||||
req->sector = sector;
|
||||
req->offset = offset;
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
|
||||
case BEISCSI_WRITE_FLASH:
|
||||
@ -495,6 +531,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
|
||||
return tag;
|
||||
}
|
||||
|
||||
wrb = wrb_from_mccq(phba);
|
||||
mcc_sge = nonembedded_sgl(wrb);
|
||||
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,
|
||||
job->request_payload.sg_cnt);
|
||||
mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
|
||||
@ -525,7 +563,6 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
|
||||
int status = 0;
|
||||
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
|
||||
@ -675,7 +712,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
|
||||
struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
|
||||
struct be_ctrl_info *ctrl = &phba->ctrl;
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct tcp_connect_and_offload_in *req;
|
||||
struct tcp_connect_and_offload_in_v1 *req;
|
||||
unsigned short def_hdr_id;
|
||||
unsigned short def_data_id;
|
||||
struct phys_addr template_address = { 0, 0 };
|
||||
@ -702,17 +739,16 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
|
||||
return tag;
|
||||
}
|
||||
wrb = wrb_from_mccq(phba);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
sge = nonembedded_sgl(wrb);
|
||||
|
||||
req = nonemb_cmd->va;
|
||||
memset(req, 0, sizeof(*req));
|
||||
wrb->tag0 |= tag;
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
|
||||
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
|
||||
OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
|
||||
sizeof(*req));
|
||||
nonemb_cmd->size);
|
||||
if (dst_addr->sa_family == PF_INET) {
|
||||
__be32 s_addr = daddr_in->sin_addr.s_addr;
|
||||
req->ip_address.ip_type = BE2_IPV4;
|
||||
@ -758,6 +794,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
|
||||
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
|
||||
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
|
||||
sge->len = cpu_to_le32(nonemb_cmd->size);
|
||||
|
||||
if (!is_chip_be2_be3r(phba)) {
|
||||
req->hdr.version = MBX_CMD_VER1;
|
||||
req->tcp_window_size = 0;
|
||||
req->tcp_window_scale_count = 2;
|
||||
}
|
||||
|
||||
be_mcc_notify(phba);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return tag;
|
||||
@ -804,7 +847,7 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
|
||||
int resp_buf_len)
|
||||
{
|
||||
struct be_ctrl_info *ctrl = &phba->ctrl;
|
||||
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_sge *sge;
|
||||
unsigned int tag;
|
||||
int rc = 0;
|
||||
@ -816,7 +859,8 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
|
||||
rc = -ENOMEM;
|
||||
goto free_cmd;
|
||||
}
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
wrb = wrb_from_mccq(phba);
|
||||
wrb->tag0 |= tag;
|
||||
sge = nonembedded_sgl(wrb);
|
||||
|
||||
|
@ -335,5 +335,7 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
|
||||
void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
|
||||
struct wrb_handle *pwrb_handle);
|
||||
void beiscsi_ue_detect(struct beiscsi_hba *phba);
|
||||
int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
|
||||
struct be_set_eqd *, int num);
|
||||
|
||||
#endif
|
||||
|
@ -507,7 +507,7 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
|
||||
struct bfad_vport_s *vport;
|
||||
int rc;
|
||||
|
||||
vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
|
||||
vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC);
|
||||
if (!vport) {
|
||||
bfa_trc(bfad, 0);
|
||||
return;
|
||||
|
@ -1966,26 +1966,29 @@ static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
|
||||
{
|
||||
int i;
|
||||
int segment_count;
|
||||
int hash_table_size;
|
||||
u32 *pbl;
|
||||
|
||||
segment_count = hba->hash_tbl_segment_count;
|
||||
hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
|
||||
sizeof(struct fcoe_hash_table_entry);
|
||||
if (hba->hash_tbl_segments) {
|
||||
|
||||
pbl = hba->hash_tbl_pbl;
|
||||
for (i = 0; i < segment_count; ++i) {
|
||||
dma_addr_t dma_address;
|
||||
pbl = hba->hash_tbl_pbl;
|
||||
if (pbl) {
|
||||
segment_count = hba->hash_tbl_segment_count;
|
||||
for (i = 0; i < segment_count; ++i) {
|
||||
dma_addr_t dma_address;
|
||||
|
||||
dma_address = le32_to_cpu(*pbl);
|
||||
++pbl;
|
||||
dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
|
||||
++pbl;
|
||||
dma_free_coherent(&hba->pcidev->dev,
|
||||
BNX2FC_HASH_TBL_CHUNK_SIZE,
|
||||
hba->hash_tbl_segments[i],
|
||||
dma_address);
|
||||
dma_address = le32_to_cpu(*pbl);
|
||||
++pbl;
|
||||
dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
|
||||
++pbl;
|
||||
dma_free_coherent(&hba->pcidev->dev,
|
||||
BNX2FC_HASH_TBL_CHUNK_SIZE,
|
||||
hba->hash_tbl_segments[i],
|
||||
dma_address);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(hba->hash_tbl_segments);
|
||||
hba->hash_tbl_segments = NULL;
|
||||
}
|
||||
|
||||
if (hba->hash_tbl_pbl) {
|
||||
@ -2023,7 +2026,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
|
||||
dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
|
||||
if (!dma_segment_array) {
|
||||
printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
|
||||
return -ENOMEM;
|
||||
goto cleanup_ht;
|
||||
}
|
||||
|
||||
for (i = 0; i < segment_count; ++i) {
|
||||
@ -2034,15 +2037,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
|
||||
GFP_KERNEL);
|
||||
if (!hba->hash_tbl_segments[i]) {
|
||||
printk(KERN_ERR PFX "hash segment alloc failed\n");
|
||||
while (--i >= 0) {
|
||||
dma_free_coherent(&hba->pcidev->dev,
|
||||
BNX2FC_HASH_TBL_CHUNK_SIZE,
|
||||
hba->hash_tbl_segments[i],
|
||||
dma_segment_array[i]);
|
||||
hba->hash_tbl_segments[i] = NULL;
|
||||
}
|
||||
kfree(dma_segment_array);
|
||||
return -ENOMEM;
|
||||
goto cleanup_dma;
|
||||
}
|
||||
memset(hba->hash_tbl_segments[i], 0,
|
||||
BNX2FC_HASH_TBL_CHUNK_SIZE);
|
||||
@ -2054,8 +2049,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
|
||||
GFP_KERNEL);
|
||||
if (!hba->hash_tbl_pbl) {
|
||||
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
|
||||
kfree(dma_segment_array);
|
||||
return -ENOMEM;
|
||||
goto cleanup_dma;
|
||||
}
|
||||
memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
|
||||
|
||||
@ -2080,6 +2074,22 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
|
||||
}
|
||||
kfree(dma_segment_array);
|
||||
return 0;
|
||||
|
||||
cleanup_dma:
|
||||
for (i = 0; i < segment_count; ++i) {
|
||||
if (hba->hash_tbl_segments[i])
|
||||
dma_free_coherent(&hba->pcidev->dev,
|
||||
BNX2FC_HASH_TBL_CHUNK_SIZE,
|
||||
hba->hash_tbl_segments[i],
|
||||
dma_segment_array[i]);
|
||||
}
|
||||
|
||||
kfree(dma_segment_array);
|
||||
|
||||
cleanup_ht:
|
||||
kfree(hba->hash_tbl_segments);
|
||||
hba->hash_tbl_segments = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3,8 +3,6 @@
|
||||
#define PSEUDO_DMA
|
||||
#define DONT_USE_INTR
|
||||
#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */
|
||||
#define xNDEBUG (NDEBUG_INTR+NDEBUG_RESELECTION+\
|
||||
NDEBUG_SELECTION+NDEBUG_ARBITRATION)
|
||||
#define DMA_WORKS_RIGHT
|
||||
|
||||
|
||||
|
@ -390,7 +390,7 @@ static int esas2r_probe(struct pci_dev *pcid,
|
||||
esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
|
||||
"pci_enable_device() OK");
|
||||
esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
|
||||
"after pci_device_enable() enable_cnt: %d",
|
||||
"after pci_enable_device() enable_cnt: %d",
|
||||
pcid->enable_cnt.counter);
|
||||
|
||||
host = scsi_host_alloc(&driver_template, host_alloc_size);
|
||||
|
@ -39,14 +39,15 @@
|
||||
|
||||
#define DRV_NAME "fnic"
|
||||
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
|
||||
#define DRV_VERSION "1.5.0.45"
|
||||
#define DRV_VERSION "1.6.0.10"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DFX DRV_NAME "%d: "
|
||||
|
||||
#define DESC_CLEAN_LOW_WATERMARK 8
|
||||
#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
|
||||
#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
|
||||
#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */
|
||||
#define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */
|
||||
#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
|
||||
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
|
||||
#define FNIC_DFLT_QUEUE_DEPTH 32
|
||||
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
|
||||
|
@ -25,6 +25,21 @@ static struct dentry *fnic_trace_debugfs_file;
|
||||
static struct dentry *fnic_trace_enable;
|
||||
static struct dentry *fnic_stats_debugfs_root;
|
||||
|
||||
static struct dentry *fnic_fc_trace_debugfs_file;
|
||||
static struct dentry *fnic_fc_rdata_trace_debugfs_file;
|
||||
static struct dentry *fnic_fc_trace_enable;
|
||||
static struct dentry *fnic_fc_trace_clear;
|
||||
|
||||
struct fc_trace_flag_type {
|
||||
u8 fc_row_file;
|
||||
u8 fc_normal_file;
|
||||
u8 fnic_trace;
|
||||
u8 fc_trace;
|
||||
u8 fc_clear;
|
||||
};
|
||||
|
||||
static struct fc_trace_flag_type *fc_trc_flag;
|
||||
|
||||
/*
|
||||
* fnic_debugfs_init - Initialize debugfs for fnic debug logging
|
||||
*
|
||||
@ -56,6 +71,18 @@ int fnic_debugfs_init(void)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Allocate memory to structure */
|
||||
fc_trc_flag = (struct fc_trace_flag_type *)
|
||||
vmalloc(sizeof(struct fc_trace_flag_type));
|
||||
|
||||
if (fc_trc_flag) {
|
||||
fc_trc_flag->fc_row_file = 0;
|
||||
fc_trc_flag->fc_normal_file = 1;
|
||||
fc_trc_flag->fnic_trace = 2;
|
||||
fc_trc_flag->fc_trace = 3;
|
||||
fc_trc_flag->fc_clear = 4;
|
||||
}
|
||||
|
||||
rc = 0;
|
||||
return rc;
|
||||
}
|
||||
@ -74,15 +101,19 @@ void fnic_debugfs_terminate(void)
|
||||
|
||||
debugfs_remove(fnic_trace_debugfs_root);
|
||||
fnic_trace_debugfs_root = NULL;
|
||||
|
||||
if (fc_trc_flag)
|
||||
vfree(fc_trc_flag);
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_trace_ctrl_open - Open the trace_enable file
|
||||
* fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
|
||||
* Or Open fc_trace_enable file for fc_trace
|
||||
* @inode: The inode pointer.
|
||||
* @file: The file pointer to attach the trace enable/disable flag.
|
||||
*
|
||||
* Description:
|
||||
* This routine opens a debugsfs file trace_enable.
|
||||
* This routine opens a debugsfs file trace_enable or fc_trace_enable.
|
||||
*
|
||||
* Returns:
|
||||
* This function returns zero if successful.
|
||||
@ -94,15 +125,19 @@ static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_trace_ctrl_read - Read a trace_enable debugfs file
|
||||
* fnic_trace_ctrl_read -
|
||||
* Read trace_enable ,fc_trace_enable
|
||||
* or fc_trace_clear debugfs file
|
||||
* @filp: The file pointer to read from.
|
||||
* @ubuf: The buffer to copy the data to.
|
||||
* @cnt: The number of bytes to read.
|
||||
* @ppos: The position in the file to start reading from.
|
||||
*
|
||||
* Description:
|
||||
* This routine reads value of variable fnic_tracing_enabled
|
||||
* and stores into local @buf. It will start reading file at @ppos and
|
||||
* This routine reads value of variable fnic_tracing_enabled or
|
||||
* fnic_fc_tracing_enabled or fnic_fc_trace_cleared
|
||||
* and stores into local @buf.
|
||||
* It will start reading file at @ppos and
|
||||
* copy up to @cnt of data to @ubuf from @buf.
|
||||
*
|
||||
* Returns:
|
||||
@ -114,13 +149,25 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
|
||||
{
|
||||
char buf[64];
|
||||
int len;
|
||||
len = sprintf(buf, "%u\n", fnic_tracing_enabled);
|
||||
u8 *trace_type;
|
||||
len = 0;
|
||||
trace_type = (u8 *)filp->private_data;
|
||||
if (*trace_type == fc_trc_flag->fnic_trace)
|
||||
len = sprintf(buf, "%u\n", fnic_tracing_enabled);
|
||||
else if (*trace_type == fc_trc_flag->fc_trace)
|
||||
len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled);
|
||||
else if (*trace_type == fc_trc_flag->fc_clear)
|
||||
len = sprintf(buf, "%u\n", fnic_fc_trace_cleared);
|
||||
else
|
||||
pr_err("fnic: Cannot read to any debugfs file\n");
|
||||
|
||||
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_trace_ctrl_write - Write to trace_enable debugfs file
|
||||
* fnic_trace_ctrl_write -
|
||||
* Write to trace_enable, fc_trace_enable or
|
||||
* fc_trace_clear debugfs file
|
||||
* @filp: The file pointer to write from.
|
||||
* @ubuf: The buffer to copy the data from.
|
||||
* @cnt: The number of bytes to write.
|
||||
@ -128,7 +175,8 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
|
||||
*
|
||||
* Description:
|
||||
* This routine writes data from user buffer @ubuf to buffer @buf and
|
||||
* sets fnic_tracing_enabled value as per user input.
|
||||
* sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared
|
||||
* value as per user input.
|
||||
*
|
||||
* Returns:
|
||||
* This function returns the amount of data that was written.
|
||||
@ -140,6 +188,8 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
u8 *trace_type;
|
||||
trace_type = (u8 *)filp->private_data;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
@ -153,12 +203,27 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
fnic_tracing_enabled = val;
|
||||
if (*trace_type == fc_trc_flag->fnic_trace)
|
||||
fnic_tracing_enabled = val;
|
||||
else if (*trace_type == fc_trc_flag->fc_trace)
|
||||
fnic_fc_tracing_enabled = val;
|
||||
else if (*trace_type == fc_trc_flag->fc_clear)
|
||||
fnic_fc_trace_cleared = val;
|
||||
else
|
||||
pr_err("fnic: cannot write to any debufs file\n");
|
||||
|
||||
(*ppos)++;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static const struct file_operations fnic_trace_ctrl_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = fnic_trace_ctrl_open,
|
||||
.read = fnic_trace_ctrl_read,
|
||||
.write = fnic_trace_ctrl_write,
|
||||
};
|
||||
|
||||
/*
|
||||
* fnic_trace_debugfs_open - Open the fnic trace log
|
||||
* @inode: The inode pointer
|
||||
@ -178,19 +243,36 @@ static int fnic_trace_debugfs_open(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
fnic_dbgfs_t *fnic_dbg_prt;
|
||||
u8 *rdata_ptr;
|
||||
rdata_ptr = (u8 *)inode->i_private;
|
||||
fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
|
||||
if (!fnic_dbg_prt)
|
||||
return -ENOMEM;
|
||||
|
||||
fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE)));
|
||||
if (!fnic_dbg_prt->buffer) {
|
||||
kfree(fnic_dbg_prt);
|
||||
return -ENOMEM;
|
||||
if (*rdata_ptr == fc_trc_flag->fnic_trace) {
|
||||
fnic_dbg_prt->buffer = vmalloc(3 *
|
||||
(trace_max_pages * PAGE_SIZE));
|
||||
if (!fnic_dbg_prt->buffer) {
|
||||
kfree(fnic_dbg_prt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset((void *)fnic_dbg_prt->buffer, 0,
|
||||
3 * (trace_max_pages * PAGE_SIZE));
|
||||
fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
|
||||
} else {
|
||||
fnic_dbg_prt->buffer =
|
||||
vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
|
||||
if (!fnic_dbg_prt->buffer) {
|
||||
kfree(fnic_dbg_prt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset((void *)fnic_dbg_prt->buffer, 0,
|
||||
3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
|
||||
fnic_dbg_prt->buffer_len =
|
||||
fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr);
|
||||
}
|
||||
memset((void *)fnic_dbg_prt->buffer, 0,
|
||||
(3*(trace_max_pages * PAGE_SIZE)));
|
||||
fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
|
||||
file->private_data = fnic_dbg_prt;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -272,13 +354,6 @@ static int fnic_trace_debugfs_release(struct inode *inode,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations fnic_trace_ctrl_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = fnic_trace_ctrl_open,
|
||||
.read = fnic_trace_ctrl_read,
|
||||
.write = fnic_trace_ctrl_write,
|
||||
};
|
||||
|
||||
static const struct file_operations fnic_trace_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = fnic_trace_debugfs_open,
|
||||
@ -306,9 +381,10 @@ int fnic_trace_debugfs_init(void)
|
||||
return rc;
|
||||
}
|
||||
fnic_trace_enable = debugfs_create_file("tracing_enable",
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic_trace_debugfs_root,
|
||||
NULL, &fnic_trace_ctrl_fops);
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic_trace_debugfs_root,
|
||||
&(fc_trc_flag->fnic_trace),
|
||||
&fnic_trace_ctrl_fops);
|
||||
|
||||
if (!fnic_trace_enable) {
|
||||
printk(KERN_DEBUG
|
||||
@ -317,10 +393,10 @@ int fnic_trace_debugfs_init(void)
|
||||
}
|
||||
|
||||
fnic_trace_debugfs_file = debugfs_create_file("trace",
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic_trace_debugfs_root,
|
||||
NULL,
|
||||
&fnic_trace_debugfs_fops);
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic_trace_debugfs_root,
|
||||
&(fc_trc_flag->fnic_trace),
|
||||
&fnic_trace_debugfs_fops);
|
||||
|
||||
if (!fnic_trace_debugfs_file) {
|
||||
printk(KERN_DEBUG
|
||||
@ -340,14 +416,104 @@ int fnic_trace_debugfs_init(void)
|
||||
*/
|
||||
void fnic_trace_debugfs_terminate(void)
|
||||
{
|
||||
if (fnic_trace_debugfs_file) {
|
||||
debugfs_remove(fnic_trace_debugfs_file);
|
||||
fnic_trace_debugfs_file = NULL;
|
||||
debugfs_remove(fnic_trace_debugfs_file);
|
||||
fnic_trace_debugfs_file = NULL;
|
||||
|
||||
debugfs_remove(fnic_trace_enable);
|
||||
fnic_trace_enable = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_fc_trace_debugfs_init -
|
||||
* Initialize debugfs for fnic control frame trace logging
|
||||
*
|
||||
* Description:
|
||||
* When Debugfs is configured this routine sets up the fnic_fc debugfs
|
||||
* file system. If not already created, this routine will create the
|
||||
* create file trace to log fnic fc trace buffer output into debugfs and
|
||||
* it will also create file fc_trace_enable to control enable/disable of
|
||||
* trace logging into trace buffer.
|
||||
*/
|
||||
|
||||
int fnic_fc_trace_debugfs_init(void)
|
||||
{
|
||||
int rc = -1;
|
||||
|
||||
if (!fnic_trace_debugfs_root) {
|
||||
pr_err("fnic:Debugfs root directory doesn't exist\n");
|
||||
return rc;
|
||||
}
|
||||
if (fnic_trace_enable) {
|
||||
debugfs_remove(fnic_trace_enable);
|
||||
fnic_trace_enable = NULL;
|
||||
|
||||
fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable",
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic_trace_debugfs_root,
|
||||
&(fc_trc_flag->fc_trace),
|
||||
&fnic_trace_ctrl_fops);
|
||||
|
||||
if (!fnic_fc_trace_enable) {
|
||||
pr_err("fnic: Failed create fc_trace_enable file\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear",
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic_trace_debugfs_root,
|
||||
&(fc_trc_flag->fc_clear),
|
||||
&fnic_trace_ctrl_fops);
|
||||
|
||||
if (!fnic_fc_trace_clear) {
|
||||
pr_err("fnic: Failed to create fc_trace_enable file\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
fnic_fc_rdata_trace_debugfs_file =
|
||||
debugfs_create_file("fc_trace_rdata",
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic_trace_debugfs_root,
|
||||
&(fc_trc_flag->fc_normal_file),
|
||||
&fnic_trace_debugfs_fops);
|
||||
|
||||
if (!fnic_fc_rdata_trace_debugfs_file) {
|
||||
pr_err("fnic: Failed create fc_rdata_trace file\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
fnic_fc_trace_debugfs_file =
|
||||
debugfs_create_file("fc_trace",
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic_trace_debugfs_root,
|
||||
&(fc_trc_flag->fc_row_file),
|
||||
&fnic_trace_debugfs_fops);
|
||||
|
||||
if (!fnic_fc_trace_debugfs_file) {
|
||||
pr_err("fnic: Failed to create fc_trace file\n");
|
||||
return rc;
|
||||
}
|
||||
rc = 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure
|
||||
*
|
||||
* Description:
|
||||
* When Debugfs is configured this routine removes debugfs file system
|
||||
* elements that are specific to fnic_fc trace logging.
|
||||
*/
|
||||
|
||||
void fnic_fc_trace_debugfs_terminate(void)
|
||||
{
|
||||
debugfs_remove(fnic_fc_trace_debugfs_file);
|
||||
fnic_fc_trace_debugfs_file = NULL;
|
||||
|
||||
debugfs_remove(fnic_fc_rdata_trace_debugfs_file);
|
||||
fnic_fc_rdata_trace_debugfs_file = NULL;
|
||||
|
||||
debugfs_remove(fnic_fc_trace_enable);
|
||||
fnic_fc_trace_enable = NULL;
|
||||
|
||||
debugfs_remove(fnic_fc_trace_clear);
|
||||
fnic_fc_trace_clear = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -66,19 +66,35 @@ void fnic_handle_link(struct work_struct *work)
|
||||
fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
|
||||
|
||||
if (old_link_status == fnic->link_status) {
|
||||
if (!fnic->link_status)
|
||||
if (!fnic->link_status) {
|
||||
/* DOWN -> DOWN */
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
else {
|
||||
fnic_fc_trace_set_data(fnic->lport->host->host_no,
|
||||
FNIC_FC_LE, "Link Status: DOWN->DOWN",
|
||||
strlen("Link Status: DOWN->DOWN"));
|
||||
} else {
|
||||
if (old_link_down_cnt != fnic->link_down_cnt) {
|
||||
/* UP -> DOWN -> UP */
|
||||
fnic->lport->host_stats.link_failure_count++;
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
fnic_fc_trace_set_data(
|
||||
fnic->lport->host->host_no,
|
||||
FNIC_FC_LE,
|
||||
"Link Status:UP_DOWN_UP",
|
||||
strlen("Link_Status:UP_DOWN_UP")
|
||||
);
|
||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"link down\n");
|
||||
fcoe_ctlr_link_down(&fnic->ctlr);
|
||||
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
|
||||
/* start FCoE VLAN discovery */
|
||||
fnic_fc_trace_set_data(
|
||||
fnic->lport->host->host_no,
|
||||
FNIC_FC_LE,
|
||||
"Link Status: UP_DOWN_UP_VLAN",
|
||||
strlen(
|
||||
"Link Status: UP_DOWN_UP_VLAN")
|
||||
);
|
||||
fnic_fcoe_send_vlan_req(fnic);
|
||||
return;
|
||||
}
|
||||
@ -88,22 +104,36 @@ void fnic_handle_link(struct work_struct *work)
|
||||
} else
|
||||
/* UP -> UP */
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
fnic_fc_trace_set_data(
|
||||
fnic->lport->host->host_no, FNIC_FC_LE,
|
||||
"Link Status: UP_UP",
|
||||
strlen("Link Status: UP_UP"));
|
||||
}
|
||||
} else if (fnic->link_status) {
|
||||
/* DOWN -> UP */
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
|
||||
/* start FCoE VLAN discovery */
|
||||
fnic_fc_trace_set_data(
|
||||
fnic->lport->host->host_no,
|
||||
FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
|
||||
strlen("Link Status: DOWN_UP_VLAN"));
|
||||
fnic_fcoe_send_vlan_req(fnic);
|
||||
return;
|
||||
}
|
||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
|
||||
fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
|
||||
"Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
|
||||
fcoe_ctlr_link_up(&fnic->ctlr);
|
||||
} else {
|
||||
/* UP -> DOWN */
|
||||
fnic->lport->host_stats.link_failure_count++;
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
|
||||
fnic_fc_trace_set_data(
|
||||
fnic->lport->host->host_no, FNIC_FC_LE,
|
||||
"Link Status: UP_DOWN",
|
||||
strlen("Link Status: UP_DOWN"));
|
||||
fcoe_ctlr_link_down(&fnic->ctlr);
|
||||
}
|
||||
|
||||
@ -267,11 +297,6 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
|
||||
|
||||
if (desc->fip_dtype == FIP_DT_FLOGI) {
|
||||
|
||||
shost_printk(KERN_DEBUG, lport->host,
|
||||
" FIP TYPE FLOGI: fab name:%llx "
|
||||
"vfid:%d map:%x\n",
|
||||
fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
|
||||
fip->sel_fcf->fc_map);
|
||||
if (dlen < sizeof(*els) + sizeof(*fh) + 1)
|
||||
return 0;
|
||||
|
||||
@ -616,6 +641,10 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
|
||||
"using UCSM\n");
|
||||
goto drop;
|
||||
}
|
||||
if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
|
||||
FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
|
||||
printk(KERN_ERR "fnic ctlr frame trace error!!!");
|
||||
}
|
||||
skb_queue_tail(&fnic->fip_frame_queue, skb);
|
||||
queue_work(fnic_fip_queue, &fnic->fip_frame_work);
|
||||
return 1; /* let caller know packet was used */
|
||||
@ -844,6 +873,10 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
|
||||
}
|
||||
fr_dev(fp) = fnic->lport;
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
|
||||
(char *)skb->data, skb->len)) != 0) {
|
||||
printk(KERN_ERR "fnic ctlr frame trace error!!!");
|
||||
}
|
||||
|
||||
skb_queue_tail(&fnic->frame_queue, skb);
|
||||
queue_work(fnic_event_queue, &fnic->frame_work);
|
||||
@ -951,6 +984,15 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
||||
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
|
||||
vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
|
||||
vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
|
||||
if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
|
||||
FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
|
||||
printk(KERN_ERR "fnic ctlr frame trace error!!!");
|
||||
}
|
||||
} else {
|
||||
if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
|
||||
FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
|
||||
printk(KERN_ERR "fnic ctlr frame trace error!!!");
|
||||
}
|
||||
}
|
||||
|
||||
pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
|
||||
@ -1023,6 +1065,11 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
|
||||
|
||||
pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
|
||||
|
||||
if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
|
||||
(char *)eth_hdr, tot_len)) != 0) {
|
||||
printk(KERN_ERR "fnic ctlr frame trace error!!!");
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fnic->wq_lock[0], flags);
|
||||
|
||||
if (!vnic_wq_desc_avail(wq)) {
|
||||
|
@ -74,6 +74,11 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
|
||||
"for fnic trace buffer");
|
||||
|
||||
unsigned int fnic_fc_trace_max_pages = 64;
|
||||
module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(fnic_fc_trace_max_pages,
|
||||
"Total allocated memory pages for fc trace buffer");
|
||||
|
||||
static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
|
||||
module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
|
||||
@ -111,7 +116,7 @@ static struct scsi_host_template fnic_host_template = {
|
||||
.change_queue_type = fc_change_queue_type,
|
||||
.this_id = -1,
|
||||
.cmd_per_lun = 3,
|
||||
.can_queue = FNIC_MAX_IO_REQ,
|
||||
.can_queue = FNIC_DFLT_IO_REQ,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
|
||||
.max_sectors = 0xffff,
|
||||
@ -773,6 +778,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"firmware uses non-FIP mode\n");
|
||||
fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
|
||||
fnic->ctlr.state = FIP_ST_NON_FIP;
|
||||
}
|
||||
fnic->state = FNIC_IN_FC_MODE;
|
||||
|
||||
@ -1033,11 +1039,20 @@ static int __init fnic_init_module(void)
|
||||
/* Allocate memory for trace buffer */
|
||||
err = fnic_trace_buf_init();
|
||||
if (err < 0) {
|
||||
printk(KERN_ERR PFX "Trace buffer initialization Failed "
|
||||
"Fnic Tracing utility is disabled\n");
|
||||
printk(KERN_ERR PFX
|
||||
"Trace buffer initialization Failed. "
|
||||
"Fnic Tracing utility is disabled\n");
|
||||
fnic_trace_free();
|
||||
}
|
||||
|
||||
/* Allocate memory for fc trace buffer */
|
||||
err = fnic_fc_trace_init();
|
||||
if (err < 0) {
|
||||
printk(KERN_ERR PFX "FC trace buffer initialization Failed "
|
||||
"FC frame tracing utility is disabled\n");
|
||||
fnic_fc_trace_free();
|
||||
}
|
||||
|
||||
/* Create a cache for allocation of default size sgls */
|
||||
len = sizeof(struct fnic_dflt_sgl_list);
|
||||
fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
|
||||
@ -1118,6 +1133,7 @@ err_create_fnic_sgl_slab_max:
|
||||
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
|
||||
err_create_fnic_sgl_slab_dflt:
|
||||
fnic_trace_free();
|
||||
fnic_fc_trace_free();
|
||||
fnic_debugfs_terminate();
|
||||
return err;
|
||||
}
|
||||
@ -1135,6 +1151,7 @@ static void __exit fnic_cleanup_module(void)
|
||||
kmem_cache_destroy(fnic_io_req_cache);
|
||||
fc_release_transport(fnic_fc_transport);
|
||||
fnic_trace_free();
|
||||
fnic_fc_trace_free();
|
||||
fnic_debugfs_terminate();
|
||||
}
|
||||
|
||||
|
@ -1312,8 +1312,9 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
|
||||
|
||||
cleanup_scsi_cmd:
|
||||
sc->result = DID_TRANSPORT_DISRUPTED << 16;
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
|
||||
" DID_TRANSPORT_DISRUPTED\n");
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
|
||||
__func__, (jiffies - start_time));
|
||||
|
||||
if (atomic64_read(&fnic->io_cmpl_skip))
|
||||
atomic64_dec(&fnic->io_cmpl_skip);
|
||||
@ -1733,6 +1734,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
||||
struct fnic_stats *fnic_stats;
|
||||
struct abort_stats *abts_stats;
|
||||
struct terminate_stats *term_stats;
|
||||
enum fnic_ioreq_state old_ioreq_state;
|
||||
int tag;
|
||||
DECLARE_COMPLETION_ONSTACK(tm_done);
|
||||
|
||||
@ -1793,6 +1795,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
||||
* the completion wont be done till mid-layer, since abort
|
||||
* has already started.
|
||||
*/
|
||||
old_ioreq_state = CMD_STATE(sc);
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
||||
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
|
||||
|
||||
@ -1816,6 +1819,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
||||
if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
|
||||
fc_lun.scsi_lun, io_req)) {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
|
||||
CMD_STATE(sc) = old_ioreq_state;
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if (io_req)
|
||||
io_req->abts_done = NULL;
|
||||
@ -1859,12 +1864,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
||||
if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
if (task_req == FCPIO_ITMF_ABT_TASK) {
|
||||
FNIC_SCSI_DBG(KERN_INFO,
|
||||
fnic->lport->host, "Abort Driver Timeout\n");
|
||||
atomic64_inc(&abts_stats->abort_drv_timeouts);
|
||||
} else {
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"Terminate Driver Timeout\n");
|
||||
atomic64_inc(&term_stats->terminate_drv_timeouts);
|
||||
}
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/time.h>
|
||||
#include "fnic_io.h"
|
||||
#include "fnic.h"
|
||||
|
||||
@ -32,6 +33,16 @@ static DEFINE_SPINLOCK(fnic_trace_lock);
|
||||
static fnic_trace_dbg_t fnic_trace_entries;
|
||||
int fnic_tracing_enabled = 1;
|
||||
|
||||
/* static char *fnic_fc_ctlr_trace_buf_p; */
|
||||
|
||||
static int fc_trace_max_entries;
|
||||
static unsigned long fnic_fc_ctlr_trace_buf_p;
|
||||
static fnic_trace_dbg_t fc_trace_entries;
|
||||
int fnic_fc_tracing_enabled = 1;
|
||||
int fnic_fc_trace_cleared = 1;
|
||||
static DEFINE_SPINLOCK(fnic_fc_trace_lock);
|
||||
|
||||
|
||||
/*
|
||||
* fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
|
||||
*
|
||||
@ -428,10 +439,10 @@ int fnic_trace_buf_init(void)
|
||||
}
|
||||
err = fnic_trace_debugfs_init();
|
||||
if (err < 0) {
|
||||
printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n");
|
||||
pr_err("fnic: Failed to initialize debugfs for tracing\n");
|
||||
goto err_fnic_trace_debugfs_init;
|
||||
}
|
||||
printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n");
|
||||
pr_info("fnic: Successfully Initialized Trace Buffer\n");
|
||||
return err;
|
||||
err_fnic_trace_debugfs_init:
|
||||
fnic_trace_free();
|
||||
@ -456,3 +467,314 @@ void fnic_trace_free(void)
|
||||
}
|
||||
printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_fc_ctlr_trace_buf_init -
|
||||
* Initialize trace buffer to log fnic control frames
|
||||
* Description:
|
||||
* Initialize trace buffer data structure by allocating
|
||||
* required memory for trace data as well as for Indexes.
|
||||
* Frame size is 256 bytes and
|
||||
* memory is allocated for 1024 entries of 256 bytes.
|
||||
* Page_offset(Index) is set to the address of trace entry
|
||||
* and page_offset is initialized by adding frame size
|
||||
* to the previous page_offset entry.
|
||||
*/
|
||||
|
||||
int fnic_fc_trace_init(void)
|
||||
{
|
||||
unsigned long fc_trace_buf_head;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
|
||||
FC_TRC_SIZE_BYTES;
|
||||
fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc(
|
||||
fnic_fc_trace_max_pages * PAGE_SIZE);
|
||||
if (!fnic_fc_ctlr_trace_buf_p) {
|
||||
pr_err("fnic: Failed to allocate memory for "
|
||||
"FC Control Trace Buf\n");
|
||||
err = -ENOMEM;
|
||||
goto err_fnic_fc_ctlr_trace_buf_init;
|
||||
}
|
||||
|
||||
memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
|
||||
fnic_fc_trace_max_pages * PAGE_SIZE);
|
||||
|
||||
/* Allocate memory for page offset */
|
||||
fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries *
|
||||
sizeof(unsigned long));
|
||||
if (!fc_trace_entries.page_offset) {
|
||||
pr_err("fnic:Failed to allocate memory for page_offset\n");
|
||||
if (fnic_fc_ctlr_trace_buf_p) {
|
||||
pr_err("fnic: Freeing FC Control Trace Buf\n");
|
||||
vfree((void *)fnic_fc_ctlr_trace_buf_p);
|
||||
fnic_fc_ctlr_trace_buf_p = 0;
|
||||
}
|
||||
err = -ENOMEM;
|
||||
goto err_fnic_fc_ctlr_trace_buf_init;
|
||||
}
|
||||
memset((void *)fc_trace_entries.page_offset, 0,
|
||||
(fc_trace_max_entries * sizeof(unsigned long)));
|
||||
|
||||
fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
|
||||
fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
|
||||
|
||||
/*
|
||||
* Set up fc_trace_entries.page_offset field with memory location
|
||||
* for every trace entry
|
||||
*/
|
||||
for (i = 0; i < fc_trace_max_entries; i++) {
|
||||
fc_trace_entries.page_offset[i] = fc_trace_buf_head;
|
||||
fc_trace_buf_head += FC_TRC_SIZE_BYTES;
|
||||
}
|
||||
err = fnic_fc_trace_debugfs_init();
|
||||
if (err < 0) {
|
||||
pr_err("fnic: Failed to initialize FC_CTLR tracing.\n");
|
||||
goto err_fnic_fc_ctlr_trace_debugfs_init;
|
||||
}
|
||||
pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n");
|
||||
return err;
|
||||
|
||||
err_fnic_fc_ctlr_trace_debugfs_init:
|
||||
fnic_fc_trace_free();
|
||||
err_fnic_fc_ctlr_trace_buf_init:
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures.
|
||||
*/
|
||||
void fnic_fc_trace_free(void)
|
||||
{
|
||||
fnic_fc_tracing_enabled = 0;
|
||||
fnic_fc_trace_debugfs_terminate();
|
||||
if (fc_trace_entries.page_offset) {
|
||||
vfree((void *)fc_trace_entries.page_offset);
|
||||
fc_trace_entries.page_offset = NULL;
|
||||
}
|
||||
if (fnic_fc_ctlr_trace_buf_p) {
|
||||
vfree((void *)fnic_fc_ctlr_trace_buf_p);
|
||||
fnic_fc_ctlr_trace_buf_p = 0;
|
||||
}
|
||||
pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_fc_ctlr_set_trace_data:
|
||||
* Maintain rd & wr idx accordingly and set data
|
||||
* Passed parameters:
|
||||
* host_no: host number accociated with fnic
|
||||
* frame_type: send_frame, rece_frame or link event
|
||||
* fc_frame: pointer to fc_frame
|
||||
* frame_len: Length of the fc_frame
|
||||
* Description:
|
||||
* This routine will get next available wr_idx and
|
||||
* copy all passed trace data to the buffer pointed by wr_idx
|
||||
* and increment wr_idx. It will also make sure that we dont
|
||||
* overwrite the entry which we are reading and also
|
||||
* wrap around if we reach the maximum entries.
|
||||
* Returned Value:
|
||||
* It will return 0 for success or -1 for failure
|
||||
*/
|
||||
int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
|
||||
char *frame, u32 fc_trc_frame_len)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct fc_trace_hdr *fc_buf;
|
||||
unsigned long eth_fcoe_hdr_len;
|
||||
char *fc_trace;
|
||||
|
||||
if (fnic_fc_tracing_enabled == 0)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&fnic_fc_trace_lock, flags);
|
||||
|
||||
if (fnic_fc_trace_cleared == 1) {
|
||||
fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
|
||||
pr_info("fnic: Reseting the read idx\n");
|
||||
memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
|
||||
fnic_fc_trace_max_pages * PAGE_SIZE);
|
||||
fnic_fc_trace_cleared = 0;
|
||||
}
|
||||
|
||||
fc_buf = (struct fc_trace_hdr *)
|
||||
fc_trace_entries.page_offset[fc_trace_entries.wr_idx];
|
||||
|
||||
fc_trace_entries.wr_idx++;
|
||||
|
||||
if (fc_trace_entries.wr_idx >= fc_trace_max_entries)
|
||||
fc_trace_entries.wr_idx = 0;
|
||||
|
||||
if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
|
||||
fc_trace_entries.rd_idx++;
|
||||
if (fc_trace_entries.rd_idx >= fc_trace_max_entries)
|
||||
fc_trace_entries.rd_idx = 0;
|
||||
}
|
||||
|
||||
fc_buf->time_stamp = CURRENT_TIME;
|
||||
fc_buf->host_no = host_no;
|
||||
fc_buf->frame_type = frame_type;
|
||||
|
||||
fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf);
|
||||
|
||||
/* During the receive path, we do not have eth hdr as well as fcoe hdr
|
||||
* at trace entry point so we will stuff 0xff just to make it generic.
|
||||
*/
|
||||
if (frame_type == FNIC_FC_RECV) {
|
||||
eth_fcoe_hdr_len = sizeof(struct ethhdr) +
|
||||
sizeof(struct fcoe_hdr);
|
||||
fc_trc_frame_len = fc_trc_frame_len + eth_fcoe_hdr_len;
|
||||
memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
|
||||
/* Copy the rest of data frame */
|
||||
memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
|
||||
min_t(u8, fc_trc_frame_len,
|
||||
(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
|
||||
} else {
|
||||
memcpy((char *)fc_trace, (void *)frame,
|
||||
min_t(u8, fc_trc_frame_len,
|
||||
(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
|
||||
}
|
||||
|
||||
/* Store the actual received length */
|
||||
fc_buf->frame_len = fc_trc_frame_len;
|
||||
|
||||
spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
|
||||
* Passed parameter:
|
||||
* @fnic_dbgfs_t: pointer to debugfs trace buffer
|
||||
* rdata_flag: 1 => Unformated file
|
||||
* 0 => formated file
|
||||
* Description:
|
||||
* This routine will copy the trace data to memory file with
|
||||
* proper formatting and also copy to another memory
|
||||
* file without formatting for further procesing.
|
||||
* Retrun Value:
|
||||
* Number of bytes that were dumped into fnic_dbgfs_t
|
||||
*/
|
||||
|
||||
int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
|
||||
{
|
||||
int rd_idx, wr_idx;
|
||||
unsigned long flags;
|
||||
int len = 0, j;
|
||||
struct fc_trace_hdr *tdata;
|
||||
char *fc_trace;
|
||||
|
||||
spin_lock_irqsave(&fnic_fc_trace_lock, flags);
|
||||
if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
|
||||
spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
|
||||
pr_info("fnic: Buffer is empty\n");
|
||||
return 0;
|
||||
}
|
||||
rd_idx = fc_trace_entries.rd_idx;
|
||||
wr_idx = fc_trace_entries.wr_idx;
|
||||
if (rdata_flag == 0) {
|
||||
len += snprintf(fnic_dbgfs_prt->buffer + len,
|
||||
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
|
||||
"Time Stamp (UTC)\t\t"
|
||||
"Host No: F Type: len: FCoE_FRAME:\n");
|
||||
}
|
||||
|
||||
while (rd_idx != wr_idx) {
|
||||
tdata = (struct fc_trace_hdr *)
|
||||
fc_trace_entries.page_offset[rd_idx];
|
||||
if (!tdata) {
|
||||
pr_info("fnic: Rd data is NULL\n");
|
||||
spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
if (rdata_flag == 0) {
|
||||
copy_and_format_trace_data(tdata,
|
||||
fnic_dbgfs_prt, &len, rdata_flag);
|
||||
} else {
|
||||
fc_trace = (char *)tdata;
|
||||
for (j = 0; j < FC_TRC_SIZE_BYTES; j++) {
|
||||
len += snprintf(fnic_dbgfs_prt->buffer + len,
|
||||
(fnic_fc_trace_max_pages * PAGE_SIZE * 3)
|
||||
- len, "%02x", fc_trace[j] & 0xff);
|
||||
} /* for loop */
|
||||
len += snprintf(fnic_dbgfs_prt->buffer + len,
|
||||
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
|
||||
"\n");
|
||||
}
|
||||
rd_idx++;
|
||||
if (rd_idx > (fc_trace_max_entries - 1))
|
||||
rd_idx = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
|
||||
return len;
|
||||
}
|
||||
|
||||
/*
|
||||
* copy_and_format_trace_data: Copy formatted data to char * buffer
|
||||
* Passed Parameter:
|
||||
* @fc_trace_hdr_t: pointer to trace data
|
||||
* @fnic_dbgfs_t: pointer to debugfs trace buffer
|
||||
* @orig_len: pointer to len
|
||||
* rdata_flag: 0 => Formated file, 1 => Unformated file
|
||||
* Description:
|
||||
* This routine will format and copy the passed trace data
|
||||
* for formated file or unformated file accordingly.
|
||||
*/
|
||||
|
||||
void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
|
||||
fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len,
|
||||
u8 rdata_flag)
|
||||
{
|
||||
struct tm tm;
|
||||
int j, i = 1, len;
|
||||
char *fc_trace, *fmt;
|
||||
int ethhdr_len = sizeof(struct ethhdr) - 1;
|
||||
int fcoehdr_len = sizeof(struct fcoe_hdr);
|
||||
int fchdr_len = sizeof(struct fc_frame_header);
|
||||
int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3;
|
||||
|
||||
tdata->frame_type = tdata->frame_type & 0x7F;
|
||||
|
||||
len = *orig_len;
|
||||
|
||||
time_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
|
||||
|
||||
fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
|
||||
len += snprintf(fnic_dbgfs_prt->buffer + len,
|
||||
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
|
||||
fmt,
|
||||
tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
|
||||
tm.tm_hour, tm.tm_min, tm.tm_sec,
|
||||
tdata->time_stamp.tv_nsec, tdata->host_no,
|
||||
tdata->frame_type, tdata->frame_len);
|
||||
|
||||
fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
|
||||
|
||||
for (j = 0; j < min_t(u8, tdata->frame_len,
|
||||
(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) {
|
||||
if (tdata->frame_type == FNIC_FC_LE) {
|
||||
len += snprintf(fnic_dbgfs_prt->buffer + len,
|
||||
max_size - len, "%c", fc_trace[j]);
|
||||
} else {
|
||||
len += snprintf(fnic_dbgfs_prt->buffer + len,
|
||||
max_size - len, "%02x", fc_trace[j] & 0xff);
|
||||
len += snprintf(fnic_dbgfs_prt->buffer + len,
|
||||
max_size - len, " ");
|
||||
if (j == ethhdr_len ||
|
||||
j == ethhdr_len + fcoehdr_len ||
|
||||
j == ethhdr_len + fcoehdr_len + fchdr_len ||
|
||||
(i > 3 && j%fchdr_len == 0)) {
|
||||
len += snprintf(fnic_dbgfs_prt->buffer
|
||||
+ len, (fnic_fc_trace_max_pages
|
||||
* PAGE_SIZE * 3) - len,
|
||||
"\n\t\t\t\t\t\t\t\t");
|
||||
i++;
|
||||
}
|
||||
} /* end of else*/
|
||||
} /* End of for loop*/
|
||||
len += snprintf(fnic_dbgfs_prt->buffer + len,
|
||||
max_size - len, "\n");
|
||||
*orig_len = len;
|
||||
}
|
||||
|
@ -19,6 +19,17 @@
|
||||
#define __FNIC_TRACE_H__
|
||||
|
||||
#define FNIC_ENTRY_SIZE_BYTES 64
|
||||
#define FC_TRC_SIZE_BYTES 256
|
||||
#define FC_TRC_HEADER_SIZE sizeof(struct fc_trace_hdr)
|
||||
|
||||
/*
|
||||
* Fisrt bit of FNIC_FC_RECV and FNIC_FC_SEND is used to represent the type
|
||||
* of frame 1 => Eth frame, 0=> FC frame
|
||||
*/
|
||||
|
||||
#define FNIC_FC_RECV 0x52 /* Character R */
|
||||
#define FNIC_FC_SEND 0x54 /* Character T */
|
||||
#define FNIC_FC_LE 0x4C /* Character L */
|
||||
|
||||
extern ssize_t simple_read_from_buffer(void __user *to,
|
||||
size_t count,
|
||||
@ -30,6 +41,10 @@ extern unsigned int fnic_trace_max_pages;
|
||||
extern int fnic_tracing_enabled;
|
||||
extern unsigned int trace_max_pages;
|
||||
|
||||
extern unsigned int fnic_fc_trace_max_pages;
|
||||
extern int fnic_fc_tracing_enabled;
|
||||
extern int fnic_fc_trace_cleared;
|
||||
|
||||
typedef struct fnic_trace_dbg {
|
||||
int wr_idx;
|
||||
int rd_idx;
|
||||
@ -56,6 +71,16 @@ struct fnic_trace_data {
|
||||
|
||||
typedef struct fnic_trace_data fnic_trace_data_t;
|
||||
|
||||
struct fc_trace_hdr {
|
||||
struct timespec time_stamp;
|
||||
u32 host_no;
|
||||
u8 frame_type;
|
||||
u8 frame_len;
|
||||
} __attribute__((__packed__));
|
||||
|
||||
#define FC_TRACE_ADDRESS(a) \
|
||||
((unsigned long)(a) + sizeof(struct fc_trace_hdr))
|
||||
|
||||
#define FNIC_TRACE_ENTRY_SIZE \
|
||||
(FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
|
||||
|
||||
@ -88,4 +113,17 @@ int fnic_debugfs_init(void);
|
||||
void fnic_debugfs_terminate(void);
|
||||
int fnic_trace_debugfs_init(void);
|
||||
void fnic_trace_debugfs_terminate(void);
|
||||
|
||||
/* Fnic FC CTLR Trace releated function */
|
||||
int fnic_fc_trace_init(void);
|
||||
void fnic_fc_trace_free(void);
|
||||
int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
|
||||
char *frame, u32 fc_frame_len);
|
||||
int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag);
|
||||
void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
|
||||
fnic_dbgfs_t *fnic_dbgfs_prt,
|
||||
int *len, u8 rdata_flag);
|
||||
int fnic_fc_trace_debugfs_init(void);
|
||||
void fnic_fc_trace_debugfs_terminate(void);
|
||||
|
||||
#endif
|
||||
|
@ -78,10 +78,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* $Log: generic_NCR5380.c,v $
|
||||
*/
|
||||
|
||||
/* settings for DTC3181E card with only Mustek scanner attached */
|
||||
#define USLEEP
|
||||
#define USLEEP_POLL 1
|
||||
|
@ -25,10 +25,6 @@
|
||||
* 1+ (800) 334-5454
|
||||
*/
|
||||
|
||||
/*
|
||||
* $Log: generic_NCR5380.h,v $
|
||||
*/
|
||||
|
||||
#ifndef GENERIC_NCR5380_H
|
||||
#define GENERIC_NCR5380_H
|
||||
|
||||
@ -58,8 +54,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
|
||||
#define CAN_QUEUE 16
|
||||
#endif
|
||||
|
||||
#ifndef HOSTS_C
|
||||
|
||||
#define __STRVAL(x) #x
|
||||
#define STRVAL(x) __STRVAL(x)
|
||||
|
||||
@ -131,7 +125,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
|
||||
#define BOARD_NCR53C400A 2
|
||||
#define BOARD_DTC3181E 3
|
||||
|
||||
#endif /* else def HOSTS_C */
|
||||
#endif /* ndef ASM */
|
||||
#endif /* GENERIC_NCR5380_H */
|
||||
|
||||
|
@ -115,9 +115,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
|
||||
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
|
||||
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
|
||||
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
|
||||
@ -165,9 +171,15 @@ static struct board_type products[] = {
|
||||
{0x21C3103C, "Smart Array", &SA5_access},
|
||||
{0x21C4103C, "Smart Array", &SA5_access},
|
||||
{0x21C5103C, "Smart Array", &SA5_access},
|
||||
{0x21C6103C, "Smart Array", &SA5_access},
|
||||
{0x21C7103C, "Smart Array", &SA5_access},
|
||||
{0x21C8103C, "Smart Array", &SA5_access},
|
||||
{0x21C9103C, "Smart Array", &SA5_access},
|
||||
{0x21CA103C, "Smart Array", &SA5_access},
|
||||
{0x21CB103C, "Smart Array", &SA5_access},
|
||||
{0x21CC103C, "Smart Array", &SA5_access},
|
||||
{0x21CD103C, "Smart Array", &SA5_access},
|
||||
{0x21CE103C, "Smart Array", &SA5_access},
|
||||
{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
|
||||
{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
|
||||
{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
|
||||
@ -2836,6 +2848,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
|
||||
|
||||
/* Get the list of physical devices */
|
||||
physicals = kzalloc(reportsize, GFP_KERNEL);
|
||||
if (physicals == NULL)
|
||||
return 0;
|
||||
if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
|
||||
reportsize, extended)) {
|
||||
dev_err(&h->pdev->dev,
|
||||
@ -2963,19 +2977,24 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
|
||||
static int hpsa_hba_mode_enabled(struct ctlr_info *h)
|
||||
{
|
||||
int rc;
|
||||
int hba_mode_enabled;
|
||||
struct bmic_controller_parameters *ctlr_params;
|
||||
ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!ctlr_params)
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
|
||||
sizeof(struct bmic_controller_parameters));
|
||||
if (rc != 0) {
|
||||
if (rc) {
|
||||
kfree(ctlr_params);
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0;
|
||||
|
||||
hba_mode_enabled =
|
||||
((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
|
||||
kfree(ctlr_params);
|
||||
return hba_mode_enabled;
|
||||
}
|
||||
|
||||
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
||||
@ -3001,7 +3020,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
||||
int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
|
||||
int i, n_ext_target_devs, ndevs_to_allocate;
|
||||
int raid_ctlr_position;
|
||||
u8 rescan_hba_mode;
|
||||
int rescan_hba_mode;
|
||||
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
|
||||
|
||||
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
|
||||
@ -3016,6 +3035,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
||||
memset(lunzerobits, 0, sizeof(lunzerobits));
|
||||
|
||||
rescan_hba_mode = hpsa_hba_mode_enabled(h);
|
||||
if (rescan_hba_mode < 0)
|
||||
goto out;
|
||||
|
||||
if (!h->hba_mode_enabled && rescan_hba_mode)
|
||||
dev_warn(&h->pdev->dev, "HBA mode enabled\n");
|
||||
|
@ -90,6 +90,7 @@ struct bmic_controller_parameters {
|
||||
u8 automatic_drive_slamming;
|
||||
u8 reserved1;
|
||||
u8 nvram_flags;
|
||||
#define HBA_MODE_ENABLED_FLAG (1 << 3)
|
||||
u8 cache_nvram_flags;
|
||||
u8 drive_config_flags;
|
||||
u16 reserved2;
|
||||
|
@ -1442,9 +1442,9 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
|
||||
conn->task = NULL;
|
||||
}
|
||||
/* regular RX path uses back_lock */
|
||||
spin_lock_bh(&conn->session->back_lock);
|
||||
spin_lock(&conn->session->back_lock);
|
||||
__iscsi_put_task(task);
|
||||
spin_unlock_bh(&conn->session->back_lock);
|
||||
spin_unlock(&conn->session->back_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -265,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
|
||||
return NULL;
|
||||
|
||||
q->hba_index = idx;
|
||||
|
||||
/*
|
||||
* insert barrier for instruction interlock : data from the hardware
|
||||
* must have the valid bit checked before it can be copied and acted
|
||||
* upon. Given what was seen in lpfc_sli4_cq_get() of speculative
|
||||
* instructions allowing action on content before valid bit checked,
|
||||
* add barrier here as well. May not be needed as "content" is a
|
||||
* single 32-bit entity here (vs multi word structure for cq's).
|
||||
*/
|
||||
mb();
|
||||
return eqe;
|
||||
}
|
||||
|
||||
@ -370,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
|
||||
|
||||
cqe = q->qe[q->hba_index].cqe;
|
||||
q->hba_index = idx;
|
||||
|
||||
/*
|
||||
* insert barrier for instruction interlock : data from the hardware
|
||||
* must have the valid bit checked before it can be copied and acted
|
||||
* upon. Speculative instructions were allowing a bcopy at the start
|
||||
* of lpfc_sli4_fp_handle_wcqe(), which is called immediately
|
||||
* after our return, to copy data before the valid bit check above
|
||||
* was done. As such, some of the copied data was stale. The barrier
|
||||
* ensures the check is before any data is copied.
|
||||
*/
|
||||
mb();
|
||||
return cqe;
|
||||
}
|
||||
|
||||
|
@ -25,10 +25,6 @@
|
||||
* 1+ (800) 334-5454
|
||||
*/
|
||||
|
||||
/*
|
||||
* $Log: mac_NCR5380.c,v $
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/ctype.h>
|
||||
@ -58,12 +54,6 @@
|
||||
|
||||
#include "NCR5380.h"
|
||||
|
||||
#if 0
|
||||
#define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION)
|
||||
#else
|
||||
#define NDEBUG (NDEBUG_ABORT)
|
||||
#endif
|
||||
|
||||
#define RESET_BOOT
|
||||
#define DRIVER_SETUP
|
||||
|
||||
|
@ -22,10 +22,6 @@
|
||||
* 1+ (800) 334-5454
|
||||
*/
|
||||
|
||||
/*
|
||||
* $Log: cumana_NCR5380.h,v $
|
||||
*/
|
||||
|
||||
#ifndef MAC_NCR5380_H
|
||||
#define MAC_NCR5380_H
|
||||
|
||||
@ -51,8 +47,6 @@
|
||||
|
||||
#include <scsi/scsicam.h>
|
||||
|
||||
#ifndef HOSTS_C
|
||||
|
||||
#define NCR5380_implementation_fields \
|
||||
int port, ctrl
|
||||
|
||||
@ -75,10 +69,6 @@
|
||||
#define NCR5380_show_info macscsi_show_info
|
||||
#define NCR5380_write_info macscsi_write_info
|
||||
|
||||
#define BOARD_NORMAL 0
|
||||
#define BOARD_NCR53C400 1
|
||||
|
||||
#endif /* ndef HOSTS_C */
|
||||
#endif /* ndef ASM */
|
||||
#endif /* MAC_NCR5380_H */
|
||||
|
||||
|
@ -3061,7 +3061,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
|
||||
u32 cur_state;
|
||||
u32 abs_state, curr_abs_state;
|
||||
|
||||
fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
|
||||
abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
|
||||
fw_state = abs_state & MFI_STATE_MASK;
|
||||
|
||||
if (fw_state != MFI_STATE_READY)
|
||||
printk(KERN_INFO "megasas: Waiting for FW to come to ready"
|
||||
@ -3069,9 +3070,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
|
||||
|
||||
while (fw_state != MFI_STATE_READY) {
|
||||
|
||||
abs_state =
|
||||
instance->instancet->read_fw_status_reg(instance->reg_set);
|
||||
|
||||
switch (fw_state) {
|
||||
|
||||
case MFI_STATE_FAULT:
|
||||
@ -3223,10 +3221,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
|
||||
* The cur_state should not last for more than max_wait secs
|
||||
*/
|
||||
for (i = 0; i < (max_wait * 1000); i++) {
|
||||
fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) &
|
||||
MFI_STATE_MASK ;
|
||||
curr_abs_state =
|
||||
instance->instancet->read_fw_status_reg(instance->reg_set);
|
||||
curr_abs_state = instance->instancet->
|
||||
read_fw_status_reg(instance->reg_set);
|
||||
|
||||
if (abs_state == curr_abs_state) {
|
||||
msleep(1);
|
||||
@ -3242,6 +3238,9 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
|
||||
"in %d secs\n", fw_state, max_wait);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
abs_state = curr_abs_state;
|
||||
fw_state = curr_abs_state & MFI_STATE_MASK;
|
||||
}
|
||||
printk(KERN_INFO "megasas: FW now in Ready state\n");
|
||||
|
||||
|
@ -1739,14 +1739,14 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
|
||||
list_for_each_entry_safe(chain_req, next,
|
||||
&ioc->scsi_lookup[i].chain_list, tracker_list) {
|
||||
list_del_init(&chain_req->tracker_list);
|
||||
list_add_tail(&chain_req->tracker_list,
|
||||
list_add(&chain_req->tracker_list,
|
||||
&ioc->free_chain_list);
|
||||
}
|
||||
}
|
||||
ioc->scsi_lookup[i].cb_idx = 0xFF;
|
||||
ioc->scsi_lookup[i].scmd = NULL;
|
||||
ioc->scsi_lookup[i].direct_io = 0;
|
||||
list_add_tail(&ioc->scsi_lookup[i].tracker_list,
|
||||
list_add(&ioc->scsi_lookup[i].tracker_list,
|
||||
&ioc->free_list);
|
||||
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
||||
|
||||
@ -1764,13 +1764,13 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
|
||||
/* hi-priority */
|
||||
i = smid - ioc->hi_priority_smid;
|
||||
ioc->hpr_lookup[i].cb_idx = 0xFF;
|
||||
list_add_tail(&ioc->hpr_lookup[i].tracker_list,
|
||||
list_add(&ioc->hpr_lookup[i].tracker_list,
|
||||
&ioc->hpr_free_list);
|
||||
} else if (smid <= ioc->hba_queue_depth) {
|
||||
/* internal queue */
|
||||
i = smid - ioc->internal_smid;
|
||||
ioc->internal_lookup[i].cb_idx = 0xFF;
|
||||
list_add_tail(&ioc->internal_lookup[i].tracker_list,
|
||||
list_add(&ioc->internal_lookup[i].tracker_list,
|
||||
&ioc->internal_free_list);
|
||||
}
|
||||
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
||||
|
@ -1065,7 +1065,7 @@ void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
|
||||
u32 reply);
|
||||
int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
|
||||
uint channel, uint id, uint lun, u8 type, u16 smid_task,
|
||||
ulong timeout, unsigned long serial_number, enum mutex_type m_type);
|
||||
ulong timeout, enum mutex_type m_type);
|
||||
void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
|
||||
void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
|
||||
void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
|
||||
|
@ -987,7 +987,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg,
|
||||
mpt2sas_scsih_issue_tm(ioc,
|
||||
le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
|
||||
0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
|
||||
0, TM_MUTEX_ON);
|
||||
TM_MUTEX_ON);
|
||||
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
|
||||
} else
|
||||
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
|
||||
|
@ -2368,7 +2368,6 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
|
||||
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
|
||||
* @smid_task: smid assigned to the task
|
||||
* @timeout: timeout in seconds
|
||||
* @serial_number: the serial_number from scmd
|
||||
* @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
|
||||
* Context: user
|
||||
*
|
||||
@ -2381,7 +2380,7 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
|
||||
int
|
||||
mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
|
||||
uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
|
||||
unsigned long serial_number, enum mutex_type m_type)
|
||||
enum mutex_type m_type)
|
||||
{
|
||||
Mpi2SCSITaskManagementRequest_t *mpi_request;
|
||||
Mpi2SCSITaskManagementReply_t *mpi_reply;
|
||||
@ -2634,8 +2633,7 @@ _scsih_abort(struct scsi_cmnd *scmd)
|
||||
handle = sas_device_priv_data->sas_target->handle;
|
||||
r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
|
||||
scmd->device->id, scmd->device->lun,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
|
||||
scmd->serial_number, TM_MUTEX_ON);
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
|
||||
|
||||
out:
|
||||
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
|
||||
@ -2696,8 +2694,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
|
||||
|
||||
r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
|
||||
scmd->device->id, scmd->device->lun,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
|
||||
TM_MUTEX_ON);
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
|
||||
|
||||
out:
|
||||
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
|
||||
@ -2757,7 +2754,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
|
||||
|
||||
r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
|
||||
scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
|
||||
30, 0, TM_MUTEX_ON);
|
||||
30, TM_MUTEX_ON);
|
||||
|
||||
out:
|
||||
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
|
||||
@ -3953,9 +3950,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
|
||||
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
|
||||
*/
|
||||
static int
|
||||
_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
|
||||
_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
|
||||
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
|
||||
struct MPT2SAS_DEVICE *sas_device_priv_data;
|
||||
struct MPT2SAS_TARGET *sas_target_priv_data;
|
||||
struct _raid_device *raid_device;
|
||||
@ -3963,7 +3960,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
|
||||
u32 mpi_control;
|
||||
u16 smid;
|
||||
|
||||
scmd->scsi_done = done;
|
||||
sas_device_priv_data = scmd->device->hostdata;
|
||||
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
|
||||
scmd->result = DID_NO_CONNECT << 16;
|
||||
@ -4039,7 +4035,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
|
||||
MPT_TARGET_FLAGS_RAID_COMPONENT)
|
||||
mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
|
||||
else
|
||||
mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
|
||||
mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
|
||||
mpi_request->DevHandle =
|
||||
cpu_to_le16(sas_device_priv_data->sas_target->handle);
|
||||
mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
|
||||
@ -4083,8 +4079,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
}
|
||||
|
||||
static DEF_SCSI_QCMD(_scsih_qcmd)
|
||||
|
||||
/**
|
||||
* _scsih_normalize_sense - normalize descriptor and fixed format sense data
|
||||
* @sense_buffer: sense data returned by target
|
||||
@ -5880,7 +5874,7 @@ broadcast_aen_retry:
|
||||
|
||||
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
||||
r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
|
||||
TM_MUTEX_OFF);
|
||||
if (r == FAILED) {
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
@ -5922,7 +5916,7 @@ broadcast_aen_retry:
|
||||
|
||||
r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
|
||||
sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
|
||||
scmd->serial_number, TM_MUTEX_OFF);
|
||||
TM_MUTEX_OFF);
|
||||
if (r == FAILED) {
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : "
|
||||
|
@ -993,7 +993,7 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
|
||||
|
||||
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
|
||||
uint channel, uint id, uint lun, u8 type, u16 smid_task,
|
||||
ulong timeout, unsigned long serial_number, enum mutex_type m_type);
|
||||
ulong timeout, enum mutex_type m_type);
|
||||
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
|
||||
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
|
||||
void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
|
||||
|
@ -980,7 +980,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
|
||||
mpt3sas_scsih_issue_tm(ioc,
|
||||
le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
|
||||
0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30,
|
||||
0, TM_MUTEX_ON);
|
||||
TM_MUTEX_ON);
|
||||
} else
|
||||
mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
|
||||
FORCE_BIG_HAMMER);
|
||||
|
@ -2029,7 +2029,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
|
||||
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
|
||||
* @smid_task: smid assigned to the task
|
||||
* @timeout: timeout in seconds
|
||||
* @serial_number: the serial_number from scmd
|
||||
* @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
|
||||
* Context: user
|
||||
*
|
||||
@ -2042,7 +2041,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
|
||||
int
|
||||
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
|
||||
uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
|
||||
unsigned long serial_number, enum mutex_type m_type)
|
||||
enum mutex_type m_type)
|
||||
{
|
||||
Mpi2SCSITaskManagementRequest_t *mpi_request;
|
||||
Mpi2SCSITaskManagementReply_t *mpi_reply;
|
||||
@ -2293,8 +2292,7 @@ _scsih_abort(struct scsi_cmnd *scmd)
|
||||
handle = sas_device_priv_data->sas_target->handle;
|
||||
r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
|
||||
scmd->device->id, scmd->device->lun,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
|
||||
scmd->serial_number, TM_MUTEX_ON);
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
|
||||
|
||||
out:
|
||||
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
|
||||
@ -2353,8 +2351,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
|
||||
|
||||
r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
|
||||
scmd->device->id, scmd->device->lun,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
|
||||
TM_MUTEX_ON);
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
|
||||
|
||||
out:
|
||||
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
|
||||
@ -2414,7 +2411,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
|
||||
|
||||
r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
|
||||
scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
|
||||
30, 0, TM_MUTEX_ON);
|
||||
30, TM_MUTEX_ON);
|
||||
|
||||
out:
|
||||
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
|
||||
@ -3518,7 +3515,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
|
||||
|
||||
|
||||
/**
|
||||
* _scsih_qcmd_lck - main scsi request entry point
|
||||
* _scsih_qcmd - main scsi request entry point
|
||||
* @scmd: pointer to scsi command object
|
||||
* @done: function pointer to be invoked on completion
|
||||
*
|
||||
@ -3529,9 +3526,9 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
|
||||
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
|
||||
*/
|
||||
static int
|
||||
_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
|
||||
_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
|
||||
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
|
||||
struct MPT3SAS_DEVICE *sas_device_priv_data;
|
||||
struct MPT3SAS_TARGET *sas_target_priv_data;
|
||||
Mpi2SCSIIORequest_t *mpi_request;
|
||||
@ -3544,7 +3541,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
|
||||
scsi_print_command(scmd);
|
||||
#endif
|
||||
|
||||
scmd->scsi_done = done;
|
||||
sas_device_priv_data = scmd->device->hostdata;
|
||||
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
|
||||
scmd->result = DID_NO_CONNECT << 16;
|
||||
@ -3659,8 +3655,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
|
||||
out:
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
}
|
||||
static DEF_SCSI_QCMD(_scsih_qcmd)
|
||||
|
||||
|
||||
/**
|
||||
* _scsih_normalize_sense - normalize descriptor and fixed format sense data
|
||||
@ -5425,7 +5419,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
|
||||
|
||||
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
||||
r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
|
||||
TM_MUTEX_OFF);
|
||||
if (r == FAILED) {
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
@ -5467,7 +5461,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
|
||||
|
||||
r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
|
||||
sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
|
||||
scmd->serial_number, TM_MUTEX_OFF);
|
||||
TM_MUTEX_OFF);
|
||||
if (r == FAILED) {
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
|
||||
|
@ -728,6 +728,15 @@ static struct pci_device_id mvs_pci_table[] = {
|
||||
.class_mask = 0,
|
||||
.driver_data = chip_9485,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_MARVELL_EXT,
|
||||
.device = 0x9485,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = 0x9485,
|
||||
.class = 0,
|
||||
.class_mask = 0,
|
||||
.driver_data = chip_9485,
|
||||
},
|
||||
{ PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */
|
||||
{ PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
|
||||
{ PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
|
||||
|
@ -129,8 +129,6 @@ static int pas16_bus_reset(Scsi_Cmnd *);
|
||||
#define CAN_QUEUE 32
|
||||
#endif
|
||||
|
||||
#ifndef HOSTS_C
|
||||
|
||||
#define NCR5380_implementation_fields \
|
||||
volatile unsigned short io_port
|
||||
|
||||
@ -171,6 +169,5 @@ static int pas16_bus_reset(Scsi_Cmnd *);
|
||||
|
||||
#define PAS16_IRQS 0xd4a8
|
||||
|
||||
#endif /* else def HOSTS_C */
|
||||
#endif /* ndef ASM */
|
||||
#endif /* PAS16_H */
|
||||
|
@ -395,6 +395,8 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
|
||||
payload.offset = 0;
|
||||
payload.length = 4096;
|
||||
payload.func_specific = kzalloc(4096, GFP_KERNEL);
|
||||
if (!payload.func_specific)
|
||||
return -ENOMEM;
|
||||
PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
|
||||
wait_for_completion(&completion);
|
||||
virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
|
||||
@ -402,6 +404,7 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
|
||||
bios_index++)
|
||||
str += sprintf(str, "%c",
|
||||
*((u8 *)((u8 *)virt_addr+bios_index)));
|
||||
kfree(payload.func_specific);
|
||||
return str - buf;
|
||||
}
|
||||
static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -664,7 +664,7 @@ do_read:
|
||||
}
|
||||
|
||||
rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
|
||||
addr, offset, SFP_BLOCK_SIZE, 0);
|
||||
addr, offset, SFP_BLOCK_SIZE, BIT_1);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_log(ql_log_warn, vha, 0x706d,
|
||||
"Unable to read SFP data (%x/%x/%x).\n", rval,
|
||||
@ -1495,7 +1495,7 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
if (!ha->fw_dumped)
|
||||
size = 0;
|
||||
else if (IS_QLA82XX(ha))
|
||||
else if (IS_P3P_TYPE(ha))
|
||||
size = ha->md_template_size + ha->md_dump_size;
|
||||
else
|
||||
size = ha->fw_dump_len;
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2012 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -2054,9 +2054,49 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
|
||||
bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
|
||||
break;
|
||||
default:
|
||||
ql_log(ql_log_warn, vha, 0x708c,
|
||||
ql_dbg(ql_dbg_user, vha, 0x708c,
|
||||
"Unknown serdes cmd %x.\n", sr.cmd);
|
||||
rval = -EDOM;
|
||||
rval = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
|
||||
rval ? EXT_STATUS_MAILBOX : 0;
|
||||
|
||||
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
|
||||
bsg_job->reply->result = DID_OK << 16;
|
||||
bsg_job->job_done(bsg_job);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qla8044_serdes_op(struct fc_bsg_job *bsg_job)
|
||||
{
|
||||
struct Scsi_Host *host = bsg_job->shost;
|
||||
scsi_qla_host_t *vha = shost_priv(host);
|
||||
int rval = 0;
|
||||
struct qla_serdes_reg_ex sr;
|
||||
|
||||
memset(&sr, 0, sizeof(sr));
|
||||
|
||||
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
|
||||
bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
|
||||
|
||||
switch (sr.cmd) {
|
||||
case INT_SC_SERDES_WRITE_REG:
|
||||
rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
|
||||
bsg_job->reply->reply_payload_rcv_len = 0;
|
||||
break;
|
||||
case INT_SC_SERDES_READ_REG:
|
||||
rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
|
||||
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
|
||||
bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
|
||||
bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
|
||||
break;
|
||||
default:
|
||||
ql_dbg(ql_dbg_user, vha, 0x70cf,
|
||||
"Unknown serdes cmd %x.\n", sr.cmd);
|
||||
rval = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2121,6 +2161,9 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
|
||||
case QL_VND_SERDES_OP:
|
||||
return qla26xx_serdes_op(bsg_job);
|
||||
|
||||
case QL_VND_SERDES_OP_EX:
|
||||
return qla8044_serdes_op(bsg_job);
|
||||
|
||||
default:
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -24,6 +24,7 @@
|
||||
#define QL_VND_READ_I2C 0x11
|
||||
#define QL_VND_FX00_MGMT_CMD 0x12
|
||||
#define QL_VND_SERDES_OP 0x13
|
||||
#define QL_VND_SERDES_OP_EX 0x14
|
||||
|
||||
/* BSG Vendor specific subcode returns */
|
||||
#define EXT_STATUS_OK 0
|
||||
@ -225,4 +226,10 @@ struct qla_serdes_reg {
|
||||
uint16_t val;
|
||||
} __packed;
|
||||
|
||||
struct qla_serdes_reg_ex {
|
||||
uint16_t cmd;
|
||||
uint32_t addr;
|
||||
uint32_t val;
|
||||
} __packed;
|
||||
|
||||
#endif
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -15,7 +15,7 @@
|
||||
* | | | 0x0144,0x0146 |
|
||||
* | | | 0x015b-0x0160 |
|
||||
* | | | 0x016e-0x0170 |
|
||||
* | Mailbox commands | 0x1187 | 0x1018-0x1019 |
|
||||
* | Mailbox commands | 0x118d | 0x1018-0x1019 |
|
||||
* | | | 0x10ca |
|
||||
* | | | 0x1115-0x1116 |
|
||||
* | | | 0x111a-0x111b |
|
||||
@ -45,12 +45,16 @@
|
||||
* | | | 0x70ad-0x70ae |
|
||||
* | | | 0x70d7-0x70db |
|
||||
* | | | 0x70de-0x70df |
|
||||
* | Task Management | 0x803d | 0x8025-0x8026 |
|
||||
* | | | 0x800b,0x8039 |
|
||||
* | Task Management | 0x803d | 0x8000,0x800b |
|
||||
* | | | 0x8019 |
|
||||
* | | | 0x8025,0x8026 |
|
||||
* | | | 0x8031,0x8032 |
|
||||
* | | | 0x8039,0x803c |
|
||||
* | AER/EEH | 0x9011 | |
|
||||
* | Virtual Port | 0xa007 | |
|
||||
* | ISP82XX Specific | 0xb14c | 0xb002,0xb024 |
|
||||
* | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
|
||||
* | | | 0xb09e,0xb0ae |
|
||||
* | | | 0xb0c3,0xb0c6 |
|
||||
* | | | 0xb0e0-0xb0ef |
|
||||
* | | | 0xb085,0xb0dc |
|
||||
* | | | 0xb107,0xb108 |
|
||||
@ -60,12 +64,12 @@
|
||||
* | | | 0xb13c-0xb140 |
|
||||
* | | | 0xb149 |
|
||||
* | MultiQ | 0xc00c | |
|
||||
* | Misc | 0xd2ff | 0xd017-0xd019 |
|
||||
* | Misc | 0xd212 | 0xd017-0xd019 |
|
||||
* | | | 0xd020 |
|
||||
* | | | 0xd02e-0xd0ff |
|
||||
* | | | 0xd030-0xd0ff |
|
||||
* | | | 0xd101-0xd1fe |
|
||||
* | | | 0xd212-0xd2fe |
|
||||
* | Target Mode | 0xe070 | 0xe021 |
|
||||
* | | | 0xd213-0xd2fe |
|
||||
* | Target Mode | 0xe078 | |
|
||||
* | Target Mode Management | 0xf072 | 0xf002-0xf003 |
|
||||
* | | | 0xf046-0xf049 |
|
||||
* | Target Mode Task Management | 0x1000b | |
|
||||
@ -277,9 +281,15 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
|
||||
if (rval != QLA_SUCCESS)
|
||||
return rval;
|
||||
|
||||
set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
|
||||
|
||||
/* External Memory. */
|
||||
return qla24xx_dump_ram(ha, 0x100000, *nxt,
|
||||
rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
|
||||
ha->fw_memory_size - 0x100000 + 1, nxt);
|
||||
if (rval == QLA_SUCCESS)
|
||||
set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
static uint32_t *
|
||||
@ -296,23 +306,15 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
|
||||
return buf;
|
||||
}
|
||||
|
||||
int
|
||||
qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
|
||||
void
|
||||
qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
|
||||
{
|
||||
int rval = QLA_SUCCESS;
|
||||
uint32_t cnt;
|
||||
|
||||
WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE);
|
||||
for (cnt = 30000;
|
||||
((RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) == 0) &&
|
||||
rval == QLA_SUCCESS; cnt--) {
|
||||
if (cnt)
|
||||
udelay(100);
|
||||
else
|
||||
rval = QLA_FUNCTION_TIMEOUT;
|
||||
}
|
||||
|
||||
return rval;
|
||||
/* 100 usec delay is sufficient enough for hardware to pause RISC */
|
||||
udelay(100);
|
||||
if (RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED)
|
||||
set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
|
||||
}
|
||||
|
||||
int
|
||||
@ -320,10 +322,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
|
||||
{
|
||||
int rval = QLA_SUCCESS;
|
||||
uint32_t cnt;
|
||||
uint16_t mb0, wd;
|
||||
uint16_t wd;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
|
||||
/* Reset RISC. */
|
||||
/*
|
||||
* Reset RISC. The delay is dependent on system architecture.
|
||||
* Driver can proceed with the reset sequence after waiting
|
||||
* for a timeout period.
|
||||
*/
|
||||
WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
|
||||
for (cnt = 0; cnt < 30000; cnt++) {
|
||||
if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
|
||||
@ -331,19 +337,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
|
||||
|
||||
udelay(10);
|
||||
}
|
||||
if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE))
|
||||
set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
|
||||
|
||||
WRT_REG_DWORD(®->ctrl_status,
|
||||
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
|
||||
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
|
||||
|
||||
udelay(100);
|
||||
/* Wait for firmware to complete NVRAM accesses. */
|
||||
mb0 = (uint32_t) RD_REG_WORD(®->mailbox0);
|
||||
for (cnt = 10000 ; cnt && mb0; cnt--) {
|
||||
udelay(5);
|
||||
mb0 = (uint32_t) RD_REG_WORD(®->mailbox0);
|
||||
barrier();
|
||||
}
|
||||
|
||||
/* Wait for soft-reset to complete. */
|
||||
for (cnt = 0; cnt < 30000; cnt++) {
|
||||
@ -353,16 +354,21 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
|
||||
|
||||
udelay(10);
|
||||
}
|
||||
if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET))
|
||||
set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
|
||||
|
||||
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET);
|
||||
RD_REG_DWORD(®->hccr); /* PCI Posting. */
|
||||
|
||||
for (cnt = 30000; RD_REG_WORD(®->mailbox0) != 0 &&
|
||||
for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 &&
|
||||
rval == QLA_SUCCESS; cnt--) {
|
||||
if (cnt)
|
||||
udelay(100);
|
||||
udelay(10);
|
||||
else
|
||||
rval = QLA_FUNCTION_TIMEOUT;
|
||||
}
|
||||
if (rval == QLA_SUCCESS)
|
||||
set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
|
||||
|
||||
return rval;
|
||||
}
|
||||
@ -659,12 +665,13 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_log(ql_log_warn, vha, 0xd000,
|
||||
"Failed to dump firmware (%x).\n", rval);
|
||||
"Failed to dump firmware (%x), dump status flags (0x%lx).\n",
|
||||
rval, ha->fw_dump_cap_flags);
|
||||
ha->fw_dumped = 0;
|
||||
} else {
|
||||
ql_log(ql_log_info, vha, 0xd001,
|
||||
"Firmware dump saved to temp buffer (%ld/%p).\n",
|
||||
vha->host_no, ha->fw_dump);
|
||||
"Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
|
||||
vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
|
||||
ha->fw_dumped = 1;
|
||||
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
|
||||
}
|
||||
@ -1053,6 +1060,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
|
||||
risc_address = ext_mem_cnt = 0;
|
||||
flags = 0;
|
||||
ha->fw_dump_cap_flags = 0;
|
||||
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
@ -1075,10 +1083,11 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
|
||||
fw->host_status = htonl(RD_REG_DWORD(®->host_status));
|
||||
|
||||
/* Pause RISC. */
|
||||
rval = qla24xx_pause_risc(reg);
|
||||
if (rval != QLA_SUCCESS)
|
||||
goto qla24xx_fw_dump_failed_0;
|
||||
/*
|
||||
* Pause RISC. No need to track timeout, as resetting the chip
|
||||
* is the right approach incase of pause timeout
|
||||
*/
|
||||
qla24xx_pause_risc(reg, ha);
|
||||
|
||||
/* Host interface registers. */
|
||||
dmp_reg = ®->flash_addr;
|
||||
@ -1302,6 +1311,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
|
||||
risc_address = ext_mem_cnt = 0;
|
||||
flags = 0;
|
||||
ha->fw_dump_cap_flags = 0;
|
||||
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
@ -1325,10 +1335,11 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
|
||||
fw->host_status = htonl(RD_REG_DWORD(®->host_status));
|
||||
|
||||
/* Pause RISC. */
|
||||
rval = qla24xx_pause_risc(reg);
|
||||
if (rval != QLA_SUCCESS)
|
||||
goto qla25xx_fw_dump_failed_0;
|
||||
/*
|
||||
* Pause RISC. No need to track timeout, as resetting the chip
|
||||
* is the right approach incase of pause timeout
|
||||
*/
|
||||
qla24xx_pause_risc(reg, ha);
|
||||
|
||||
/* Host/Risc registers. */
|
||||
iter_reg = fw->host_risc_reg;
|
||||
@ -1619,6 +1630,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
|
||||
risc_address = ext_mem_cnt = 0;
|
||||
flags = 0;
|
||||
ha->fw_dump_cap_flags = 0;
|
||||
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
@ -1641,10 +1653,11 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
|
||||
fw->host_status = htonl(RD_REG_DWORD(®->host_status));
|
||||
|
||||
/* Pause RISC. */
|
||||
rval = qla24xx_pause_risc(reg);
|
||||
if (rval != QLA_SUCCESS)
|
||||
goto qla81xx_fw_dump_failed_0;
|
||||
/*
|
||||
* Pause RISC. No need to track timeout, as resetting the chip
|
||||
* is the right approach incase of pause timeout
|
||||
*/
|
||||
qla24xx_pause_risc(reg, ha);
|
||||
|
||||
/* Host/Risc registers. */
|
||||
iter_reg = fw->host_risc_reg;
|
||||
@ -1938,6 +1951,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
|
||||
risc_address = ext_mem_cnt = 0;
|
||||
flags = 0;
|
||||
ha->fw_dump_cap_flags = 0;
|
||||
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
@ -1959,10 +1973,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
|
||||
fw->host_status = htonl(RD_REG_DWORD(®->host_status));
|
||||
|
||||
/* Pause RISC. */
|
||||
rval = qla24xx_pause_risc(reg);
|
||||
if (rval != QLA_SUCCESS)
|
||||
goto qla83xx_fw_dump_failed_0;
|
||||
/*
|
||||
* Pause RISC. No need to track timeout, as resetting the chip
|
||||
* is the right approach incase of pause timeout
|
||||
*/
|
||||
qla24xx_pause_risc(reg, ha);
|
||||
|
||||
WRT_REG_DWORD(®->iobase_addr, 0x6000);
|
||||
dmp_reg = ®->iobase_window;
|
||||
@ -2385,9 +2400,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
nxt += sizeof(fw->code_ram);
|
||||
nxt += (ha->fw_memory_size - 0x100000 + 1);
|
||||
goto copy_queue;
|
||||
} else
|
||||
} else {
|
||||
set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
|
||||
ql_log(ql_log_warn, vha, 0xd010,
|
||||
"bigger hammer success?\n");
|
||||
}
|
||||
}
|
||||
|
||||
rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -353,5 +353,6 @@ extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
|
||||
uint32_t, void **);
|
||||
extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
|
||||
uint32_t, void **);
|
||||
extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *);
|
||||
extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
|
||||
struct qla_hw_data *);
|
||||
extern int qla24xx_soft_reset(struct qla_hw_data *);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -965,6 +965,13 @@ struct mbx_cmd_32 {
|
||||
*/
|
||||
#define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */
|
||||
|
||||
/*
|
||||
* ISP8044 mailbox commands
|
||||
*/
|
||||
#define MBC_SET_GET_ETH_SERDES_REG 0x150
|
||||
#define HCS_WRITE_SERDES 0x3
|
||||
#define HCS_READ_SERDES 0x4
|
||||
|
||||
/* Firmware return data sizes */
|
||||
#define FCAL_MAP_SIZE 128
|
||||
|
||||
@ -1622,10 +1629,20 @@ typedef struct {
|
||||
#define PO_MODE_DIF_PASS 2
|
||||
#define PO_MODE_DIF_REPLACE 3
|
||||
#define PO_MODE_DIF_TCP_CKSUM 6
|
||||
#define PO_ENABLE_DIF_BUNDLING BIT_8
|
||||
#define PO_ENABLE_INCR_GUARD_SEED BIT_3
|
||||
#define PO_DISABLE_INCR_REF_TAG BIT_5
|
||||
#define PO_DISABLE_GUARD_CHECK BIT_4
|
||||
#define PO_DISABLE_INCR_REF_TAG BIT_5
|
||||
#define PO_DIS_HEADER_MODE BIT_7
|
||||
#define PO_ENABLE_DIF_BUNDLING BIT_8
|
||||
#define PO_DIS_FRAME_MODE BIT_9
|
||||
#define PO_DIS_VALD_APP_ESC BIT_10 /* Dis validation for escape tag/ffffh */
|
||||
#define PO_DIS_VALD_APP_REF_ESC BIT_11
|
||||
|
||||
#define PO_DIS_APP_TAG_REPL BIT_12 /* disable REG Tag replacement */
|
||||
#define PO_DIS_REF_TAG_REPL BIT_13
|
||||
#define PO_DIS_APP_TAG_VALD BIT_14 /* disable REF Tag validation */
|
||||
#define PO_DIS_REF_TAG_VALD BIT_15
|
||||
|
||||
/*
|
||||
* ISP queue - 64-Bit addressing, continuation crc entry structure definition.
|
||||
*/
|
||||
@ -1748,6 +1765,8 @@ typedef struct {
|
||||
#define CS_PORT_CONFIG_CHG 0x2A /* Port Configuration Changed */
|
||||
#define CS_PORT_BUSY 0x2B /* Port Busy */
|
||||
#define CS_COMPLETE_CHKCOND 0x30 /* Error? */
|
||||
#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request
|
||||
failure */
|
||||
#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
|
||||
#define CS_UNKNOWN 0x81 /* Driver defined */
|
||||
#define CS_RETRY 0x82 /* Driver defined */
|
||||
@ -2676,6 +2695,7 @@ struct rsp_que {
|
||||
uint32_t __iomem *rsp_q_out;
|
||||
uint16_t ring_index;
|
||||
uint16_t out_ptr;
|
||||
uint16_t *in_ptr; /* queue shadow in index */
|
||||
uint16_t length;
|
||||
uint16_t options;
|
||||
uint16_t rid;
|
||||
@ -2702,6 +2722,7 @@ struct req_que {
|
||||
uint32_t __iomem *req_q_out;
|
||||
uint16_t ring_index;
|
||||
uint16_t in_ptr;
|
||||
uint16_t *out_ptr; /* queue shadow out index */
|
||||
uint16_t cnt;
|
||||
uint16_t length;
|
||||
uint16_t options;
|
||||
@ -2907,6 +2928,8 @@ struct qla_hw_data {
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271
|
||||
|
||||
uint32_t device_type;
|
||||
#define DT_ISP2100 BIT_0
|
||||
#define DT_ISP2200 BIT_1
|
||||
@ -2928,7 +2951,8 @@ struct qla_hw_data {
|
||||
#define DT_ISPFX00 BIT_17
|
||||
#define DT_ISP8044 BIT_18
|
||||
#define DT_ISP2071 BIT_19
|
||||
#define DT_ISP_LAST (DT_ISP2071 << 1)
|
||||
#define DT_ISP2271 BIT_20
|
||||
#define DT_ISP_LAST (DT_ISP2271 << 1)
|
||||
|
||||
#define DT_T10_PI BIT_25
|
||||
#define DT_IIDMA BIT_26
|
||||
@ -2959,6 +2983,7 @@ struct qla_hw_data {
|
||||
#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
|
||||
#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
|
||||
#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
|
||||
#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271)
|
||||
|
||||
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
|
||||
IS_QLA6312(ha) || IS_QLA6322(ha))
|
||||
@ -2967,7 +2992,7 @@ struct qla_hw_data {
|
||||
#define IS_QLA25XX(ha) (IS_QLA2532(ha))
|
||||
#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
|
||||
#define IS_QLA84XX(ha) (IS_QLA8432(ha))
|
||||
#define IS_QLA27XX(ha) (IS_QLA2071(ha))
|
||||
#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha))
|
||||
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
|
||||
IS_QLA84XX(ha))
|
||||
#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
|
||||
@ -3006,6 +3031,7 @@ struct qla_hw_data {
|
||||
(((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
|
||||
#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
|
||||
#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
|
||||
#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
|
||||
|
||||
/* HBA serial number */
|
||||
uint8_t serial0;
|
||||
@ -3136,7 +3162,15 @@ struct qla_hw_data {
|
||||
struct qla2xxx_fw_dump *fw_dump;
|
||||
uint32_t fw_dump_len;
|
||||
int fw_dumped;
|
||||
unsigned long fw_dump_cap_flags;
|
||||
#define RISC_PAUSE_CMPL 0
|
||||
#define DMA_SHUTDOWN_CMPL 1
|
||||
#define ISP_RESET_CMPL 2
|
||||
#define RISC_RDY_AFT_RESET 3
|
||||
#define RISC_SRAM_DUMP_CMPL 4
|
||||
#define RISC_EXT_MEM_DUMP_CMPL 5
|
||||
int fw_dump_reading;
|
||||
int prev_minidump_failed;
|
||||
dma_addr_t eft_dma;
|
||||
void *eft;
|
||||
/* Current size of mctp dump is 0x086064 bytes */
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -371,7 +371,10 @@ struct init_cb_24xx {
|
||||
* BIT 14 = Data Rate bit 1
|
||||
* BIT 15 = Data Rate bit 2
|
||||
* BIT 16 = Enable 75 ohm Termination Select
|
||||
* BIT 17-31 = Reserved
|
||||
* BIT 17-28 = Reserved
|
||||
* BIT 29 = Enable response queue 0 in index shadowing
|
||||
* BIT 30 = Enable request queue 0 out index shadowing
|
||||
* BIT 31 = Reserved
|
||||
*/
|
||||
uint32_t firmware_options_3;
|
||||
uint16_t qos;
|
||||
@ -1134,13 +1137,6 @@ struct device_reg_24xx {
|
||||
#define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */
|
||||
#define MAX_MULTI_ID_FABRIC 256 /* ... */
|
||||
|
||||
#define for_each_mapped_vp_idx(_ha, _idx) \
|
||||
for (_idx = find_next_bit((_ha)->vp_idx_map, \
|
||||
(_ha)->max_npiv_vports + 1, 1); \
|
||||
_idx <= (_ha)->max_npiv_vports; \
|
||||
_idx = find_next_bit((_ha)->vp_idx_map, \
|
||||
(_ha)->max_npiv_vports + 1, _idx + 1)) \
|
||||
|
||||
struct mid_conf_entry_24xx {
|
||||
uint16_t reserved_1;
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -220,6 +220,13 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
|
||||
|
||||
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
|
||||
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
|
||||
extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
|
||||
uint32_t *, uint16_t, struct qla_tgt_cmd *);
|
||||
extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
|
||||
uint32_t *, uint16_t, struct qla_tgt_cmd *);
|
||||
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
|
||||
uint32_t *, uint16_t, struct qla_tgt_cmd *);
|
||||
|
||||
|
||||
/*
|
||||
* Global Function Prototypes in qla_mbx.c source file.
|
||||
@ -346,6 +353,11 @@ qla2x00_write_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t);
|
||||
extern int
|
||||
qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *);
|
||||
|
||||
extern int
|
||||
qla8044_write_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t);
|
||||
extern int
|
||||
qla8044_read_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t *);
|
||||
|
||||
extern int
|
||||
qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -1476,6 +1476,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
|
||||
}
|
||||
|
||||
ha->fw_dumped = 0;
|
||||
ha->fw_dump_cap_flags = 0;
|
||||
dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
|
||||
req_q_size = rsp_q_size = 0;
|
||||
|
||||
@ -2061,6 +2062,10 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
|
||||
icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
|
||||
icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
|
||||
|
||||
if (IS_SHADOW_REG_CAPABLE(ha))
|
||||
icb->firmware_options_2 |=
|
||||
__constant_cpu_to_le32(BIT_30|BIT_29);
|
||||
|
||||
if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
|
||||
icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
|
||||
icb->rid = __constant_cpu_to_le16(rid);
|
||||
@ -2138,6 +2143,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
|
||||
req = ha->req_q_map[que];
|
||||
if (!req)
|
||||
continue;
|
||||
req->out_ptr = (void *)(req->ring + req->length);
|
||||
*req->out_ptr = 0;
|
||||
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
|
||||
req->outstanding_cmds[cnt] = NULL;
|
||||
|
||||
@ -2153,6 +2160,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
|
||||
rsp = ha->rsp_q_map[que];
|
||||
if (!rsp)
|
||||
continue;
|
||||
rsp->in_ptr = (void *)(rsp->ring + rsp->length);
|
||||
*rsp->in_ptr = 0;
|
||||
/* Initialize response queue entries */
|
||||
if (IS_QLAFX00(ha))
|
||||
qlafx00_init_response_q_entries(rsp);
|
||||
@ -3406,7 +3415,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
|
||||
fcport->d_id.b.domain,
|
||||
fcport->d_id.b.area,
|
||||
fcport->d_id.b.al_pa);
|
||||
fcport->loop_id = FC_NO_LOOP_ID;
|
||||
qla2x00_clear_loop_id(fcport);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4727,7 +4736,6 @@ static int
|
||||
qla2x00_restart_isp(scsi_qla_host_t *vha)
|
||||
{
|
||||
int status = 0;
|
||||
uint32_t wait_time;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct req_que *req = ha->req_q_map[0];
|
||||
struct rsp_que *rsp = ha->rsp_q_map[0];
|
||||
@ -4744,14 +4752,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
|
||||
if (!status && !(status = qla2x00_init_rings(vha))) {
|
||||
clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
|
||||
ha->flags.chip_reset_done = 1;
|
||||
|
||||
/* Initialize the queues in use */
|
||||
qla25xx_init_queues(ha);
|
||||
|
||||
status = qla2x00_fw_ready(vha);
|
||||
if (!status) {
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x8031,
|
||||
"Start configure loop status = %d.\n", status);
|
||||
|
||||
/* Issue a marker after FW becomes ready. */
|
||||
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
|
||||
|
||||
@ -4766,24 +4772,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
|
||||
qlt_24xx_process_atio_queue(vha);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
/* Wait at most MAX_TARGET RSCNs for a stable link. */
|
||||
wait_time = 256;
|
||||
do {
|
||||
clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
|
||||
qla2x00_configure_loop(vha);
|
||||
wait_time--;
|
||||
} while (!atomic_read(&vha->loop_down_timer) &&
|
||||
!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
|
||||
&& wait_time && (test_bit(LOOP_RESYNC_NEEDED,
|
||||
&vha->dpc_flags)));
|
||||
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
|
||||
}
|
||||
|
||||
/* if no cable then assume it's good */
|
||||
if ((vha->device_flags & DFLG_NO_CABLE))
|
||||
status = 0;
|
||||
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x8032,
|
||||
"Configure loop done, status = 0x%x.\n", status);
|
||||
}
|
||||
return (status);
|
||||
}
|
||||
@ -6130,7 +6124,6 @@ int
|
||||
qla82xx_restart_isp(scsi_qla_host_t *vha)
|
||||
{
|
||||
int status, rval;
|
||||
uint32_t wait_time;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct req_que *req = ha->req_q_map[0];
|
||||
struct rsp_que *rsp = ha->rsp_q_map[0];
|
||||
@ -6144,31 +6137,15 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
|
||||
|
||||
status = qla2x00_fw_ready(vha);
|
||||
if (!status) {
|
||||
ql_log(ql_log_info, vha, 0x803c,
|
||||
"Start configure loop, status =%d.\n", status);
|
||||
|
||||
/* Issue a marker after FW becomes ready. */
|
||||
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
|
||||
|
||||
vha->flags.online = 1;
|
||||
/* Wait at most MAX_TARGET RSCNs for a stable link. */
|
||||
wait_time = 256;
|
||||
do {
|
||||
clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
|
||||
qla2x00_configure_loop(vha);
|
||||
wait_time--;
|
||||
} while (!atomic_read(&vha->loop_down_timer) &&
|
||||
!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
|
||||
wait_time &&
|
||||
(test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
|
||||
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
|
||||
}
|
||||
|
||||
/* if no cable then assume it's good */
|
||||
if ((vha->device_flags & DFLG_NO_CABLE))
|
||||
status = 0;
|
||||
|
||||
ql_log(ql_log_info, vha, 0x8000,
|
||||
"Configure loop done, status = 0x%x.\n", status);
|
||||
}
|
||||
|
||||
if (!status) {
|
||||
@ -6182,8 +6159,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
|
||||
vha->marker_needed = 1;
|
||||
}
|
||||
|
||||
vha->flags.online = 1;
|
||||
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
|
||||
ha->isp_abort_cnt = 0;
|
||||
|
@ -1,10 +1,11 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
|
||||
#include "qla_target.h"
|
||||
/**
|
||||
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
|
||||
* Continuation Type 1 IOCBs to allocate.
|
||||
@ -128,12 +129,20 @@ qla2x00_clear_loop_id(fc_port_t *fcport) {
|
||||
}
|
||||
|
||||
static inline void
|
||||
qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
|
||||
qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp,
|
||||
struct qla_tgt_cmd *tc)
|
||||
{
|
||||
struct dsd_dma *dsd_ptr, *tdsd_ptr;
|
||||
struct crc_context *ctx;
|
||||
|
||||
ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
|
||||
if (sp)
|
||||
ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
|
||||
else if (tc)
|
||||
ctx = (struct crc_context *)tc->ctx;
|
||||
else {
|
||||
BUG();
|
||||
return;
|
||||
}
|
||||
|
||||
/* clean up allocated prev pool */
|
||||
list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -936,9 +936,9 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
|
||||
uint32_t *dsd, uint16_t tot_dsds)
|
||||
uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
|
||||
{
|
||||
void *next_dsd;
|
||||
uint8_t avail_dsds = 0;
|
||||
@ -948,21 +948,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
|
||||
uint32_t *cur_dsd = dsd;
|
||||
uint16_t used_dsds = tot_dsds;
|
||||
|
||||
uint32_t prot_int;
|
||||
uint32_t prot_int; /* protection interval */
|
||||
uint32_t partial;
|
||||
struct qla2_sgx sgx;
|
||||
dma_addr_t sle_dma;
|
||||
uint32_t sle_dma_len, tot_prot_dma_len = 0;
|
||||
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
|
||||
|
||||
prot_int = cmd->device->sector_size;
|
||||
struct scsi_cmnd *cmd;
|
||||
struct scsi_qla_host *vha;
|
||||
|
||||
memset(&sgx, 0, sizeof(struct qla2_sgx));
|
||||
sgx.tot_bytes = scsi_bufflen(cmd);
|
||||
sgx.cur_sg = scsi_sglist(cmd);
|
||||
sgx.sp = sp;
|
||||
if (sp) {
|
||||
vha = sp->fcport->vha;
|
||||
cmd = GET_CMD_SP(sp);
|
||||
prot_int = cmd->device->sector_size;
|
||||
|
||||
sg_prot = scsi_prot_sglist(cmd);
|
||||
sgx.tot_bytes = scsi_bufflen(cmd);
|
||||
sgx.cur_sg = scsi_sglist(cmd);
|
||||
sgx.sp = sp;
|
||||
|
||||
sg_prot = scsi_prot_sglist(cmd);
|
||||
} else if (tc) {
|
||||
vha = tc->vha;
|
||||
prot_int = tc->blk_sz;
|
||||
sgx.tot_bytes = tc->bufflen;
|
||||
sgx.cur_sg = tc->sg;
|
||||
sg_prot = tc->prot_sg;
|
||||
} else {
|
||||
BUG();
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
|
||||
|
||||
@ -995,10 +1009,18 @@ alloc_and_fill:
|
||||
return 1;
|
||||
}
|
||||
|
||||
list_add_tail(&dsd_ptr->list,
|
||||
&((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
|
||||
if (sp) {
|
||||
list_add_tail(&dsd_ptr->list,
|
||||
&((struct crc_context *)
|
||||
sp->u.scmd.ctx)->dsd_list);
|
||||
|
||||
sp->flags |= SRB_CRC_CTX_DSD_VALID;
|
||||
} else {
|
||||
list_add_tail(&dsd_ptr->list,
|
||||
&(tc->ctx->dsd_list));
|
||||
tc->ctx_dsd_alloced = 1;
|
||||
}
|
||||
|
||||
sp->flags |= SRB_CRC_CTX_DSD_VALID;
|
||||
|
||||
/* add new list to cmd iocb or last list */
|
||||
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
|
||||
@ -1033,21 +1055,35 @@ alloc_and_fill:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
|
||||
uint16_t tot_dsds)
|
||||
uint16_t tot_dsds, struct qla_tgt_cmd *tc)
|
||||
{
|
||||
void *next_dsd;
|
||||
uint8_t avail_dsds = 0;
|
||||
uint32_t dsd_list_len;
|
||||
struct dsd_dma *dsd_ptr;
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *sg, *sgl;
|
||||
uint32_t *cur_dsd = dsd;
|
||||
int i;
|
||||
uint16_t used_dsds = tot_dsds;
|
||||
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
|
||||
struct scsi_cmnd *cmd;
|
||||
struct scsi_qla_host *vha;
|
||||
|
||||
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
|
||||
if (sp) {
|
||||
cmd = GET_CMD_SP(sp);
|
||||
sgl = scsi_sglist(cmd);
|
||||
vha = sp->fcport->vha;
|
||||
} else if (tc) {
|
||||
sgl = tc->sg;
|
||||
vha = tc->vha;
|
||||
} else {
|
||||
BUG();
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
for_each_sg(sgl, sg, tot_dsds, i) {
|
||||
dma_addr_t sle_dma;
|
||||
|
||||
/* Allocate additional continuation packets? */
|
||||
@ -1076,10 +1112,17 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
|
||||
return 1;
|
||||
}
|
||||
|
||||
list_add_tail(&dsd_ptr->list,
|
||||
&((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
|
||||
if (sp) {
|
||||
list_add_tail(&dsd_ptr->list,
|
||||
&((struct crc_context *)
|
||||
sp->u.scmd.ctx)->dsd_list);
|
||||
|
||||
sp->flags |= SRB_CRC_CTX_DSD_VALID;
|
||||
sp->flags |= SRB_CRC_CTX_DSD_VALID;
|
||||
} else {
|
||||
list_add_tail(&dsd_ptr->list,
|
||||
&(tc->ctx->dsd_list));
|
||||
tc->ctx_dsd_alloced = 1;
|
||||
}
|
||||
|
||||
/* add new list to cmd iocb or last list */
|
||||
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
|
||||
@ -1102,23 +1145,37 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
|
||||
uint32_t *dsd,
|
||||
uint16_t tot_dsds)
|
||||
uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
|
||||
{
|
||||
void *next_dsd;
|
||||
uint8_t avail_dsds = 0;
|
||||
uint32_t dsd_list_len;
|
||||
struct dsd_dma *dsd_ptr;
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *sg, *sgl;
|
||||
int i;
|
||||
struct scsi_cmnd *cmd;
|
||||
uint32_t *cur_dsd = dsd;
|
||||
uint16_t used_dsds = tot_dsds;
|
||||
uint16_t used_dsds = tot_dsds;
|
||||
struct scsi_qla_host *vha;
|
||||
|
||||
cmd = GET_CMD_SP(sp);
|
||||
scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
|
||||
if (sp) {
|
||||
cmd = GET_CMD_SP(sp);
|
||||
sgl = scsi_prot_sglist(cmd);
|
||||
vha = sp->fcport->vha;
|
||||
} else if (tc) {
|
||||
vha = tc->vha;
|
||||
sgl = tc->prot_sg;
|
||||
} else {
|
||||
BUG();
|
||||
return 1;
|
||||
}
|
||||
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe021,
|
||||
"%s: enter\n", __func__);
|
||||
|
||||
for_each_sg(sgl, sg, tot_dsds, i) {
|
||||
dma_addr_t sle_dma;
|
||||
|
||||
/* Allocate additional continuation packets? */
|
||||
@ -1147,10 +1204,17 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
|
||||
return 1;
|
||||
}
|
||||
|
||||
list_add_tail(&dsd_ptr->list,
|
||||
&((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
|
||||
if (sp) {
|
||||
list_add_tail(&dsd_ptr->list,
|
||||
&((struct crc_context *)
|
||||
sp->u.scmd.ctx)->dsd_list);
|
||||
|
||||
sp->flags |= SRB_CRC_CTX_DSD_VALID;
|
||||
sp->flags |= SRB_CRC_CTX_DSD_VALID;
|
||||
} else {
|
||||
list_add_tail(&dsd_ptr->list,
|
||||
&(tc->ctx->dsd_list));
|
||||
tc->ctx_dsd_alloced = 1;
|
||||
}
|
||||
|
||||
/* add new list to cmd iocb or last list */
|
||||
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
|
||||
@ -1386,10 +1450,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
|
||||
if (!bundling && tot_prot_dsds) {
|
||||
if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
|
||||
cur_dsd, tot_dsds))
|
||||
cur_dsd, tot_dsds, NULL))
|
||||
goto crc_queuing_error;
|
||||
} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
|
||||
(tot_dsds - tot_prot_dsds)))
|
||||
(tot_dsds - tot_prot_dsds), NULL))
|
||||
goto crc_queuing_error;
|
||||
|
||||
if (bundling && tot_prot_dsds) {
|
||||
@ -1398,7 +1462,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
|
||||
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
|
||||
if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
|
||||
tot_prot_dsds))
|
||||
tot_prot_dsds, NULL))
|
||||
goto crc_queuing_error;
|
||||
}
|
||||
return QLA_SUCCESS;
|
||||
@ -1478,8 +1542,8 @@ qla24xx_start_scsi(srb_t *sp)
|
||||
tot_dsds = nseg;
|
||||
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
|
||||
if (req->cnt < (req_cnt + 2)) {
|
||||
cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
|
||||
|
||||
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
|
||||
RD_REG_DWORD_RELAXED(req->req_q_out);
|
||||
if (req->ring_index < cnt)
|
||||
req->cnt = cnt - req->ring_index;
|
||||
else
|
||||
@ -1697,8 +1761,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
|
||||
tot_prot_dsds = nseg;
|
||||
tot_dsds += nseg;
|
||||
if (req->cnt < (req_cnt + 2)) {
|
||||
cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
|
||||
|
||||
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
|
||||
RD_REG_DWORD_RELAXED(req->req_q_out);
|
||||
if (req->ring_index < cnt)
|
||||
req->cnt = cnt - req->ring_index;
|
||||
else
|
||||
@ -2825,8 +2889,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
|
||||
|
||||
/* Check for room on request queue. */
|
||||
if (req->cnt < req_cnt + 2) {
|
||||
cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
|
||||
|
||||
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
|
||||
RD_REG_DWORD_RELAXED(req->req_q_out);
|
||||
if (req->ring_index < cnt)
|
||||
req->cnt = cnt - req->ring_index;
|
||||
else
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -2009,11 +2009,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
ql_dbg(ql_dbg_io, vha, 0x3017,
|
||||
"Invalid status handle (0x%x).\n", sts->handle);
|
||||
|
||||
if (IS_P3P_TYPE(ha))
|
||||
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
|
||||
else
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(vha);
|
||||
if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
|
||||
if (IS_P3P_TYPE(ha))
|
||||
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
|
||||
else
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(vha);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2472,12 +2474,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
|
||||
if (pkt->entry_status != 0) {
|
||||
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
|
||||
|
||||
(void)qlt_24xx_process_response_error(vha, pkt);
|
||||
if (qlt_24xx_process_response_error(vha, pkt))
|
||||
goto process_err;
|
||||
|
||||
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
|
||||
wmb();
|
||||
continue;
|
||||
}
|
||||
process_err:
|
||||
|
||||
switch (pkt->entry_type) {
|
||||
case STATUS_TYPE:
|
||||
@ -2494,10 +2498,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
|
||||
qla24xx_logio_entry(vha, rsp->req,
|
||||
(struct logio_entry_24xx *)pkt);
|
||||
break;
|
||||
case CT_IOCB_TYPE:
|
||||
case CT_IOCB_TYPE:
|
||||
qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
|
||||
break;
|
||||
case ELS_IOCB_TYPE:
|
||||
case ELS_IOCB_TYPE:
|
||||
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
|
||||
break;
|
||||
case ABTS_RECV_24XX:
|
||||
@ -2506,6 +2510,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
|
||||
case ABTS_RESP_24XX:
|
||||
case CTIO_TYPE7:
|
||||
case NOTIFY_ACK_TYPE:
|
||||
case CTIO_CRC2:
|
||||
qlt_response_pkt_all_vps(vha, (response_t *)pkt);
|
||||
break;
|
||||
case MARKER_TYPE:
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -1319,7 +1319,7 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
|
||||
|
||||
left = 0;
|
||||
|
||||
list = kzalloc(dma_size, GFP_KERNEL);
|
||||
list = kmemdup(pmap, dma_size, GFP_KERNEL);
|
||||
if (!list) {
|
||||
ql_log(ql_log_warn, vha, 0x1140,
|
||||
"%s(%ld): failed to allocate node names list "
|
||||
@ -1328,7 +1328,6 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
memcpy(list, pmap, dma_size);
|
||||
restart:
|
||||
dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
|
||||
}
|
||||
@ -2644,7 +2643,10 @@ qla24xx_abort_command(srb_t *sp)
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x1090,
|
||||
"Failed to complete IOCB -- completion status (%x).\n",
|
||||
le16_to_cpu(abt->nport_handle));
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
if (abt->nport_handle == CS_IOCB_ERROR)
|
||||
rval = QLA_FUNCTION_PARAMETER_ERROR;
|
||||
else
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
|
||||
"Done %s.\n", __func__);
|
||||
@ -2879,6 +2881,78 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
|
||||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
|
||||
{
|
||||
int rval;
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
if (!IS_QLA8044(vha->hw))
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186,
|
||||
"Entered %s.\n", __func__);
|
||||
|
||||
mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
|
||||
mcp->mb[1] = HCS_WRITE_SERDES;
|
||||
mcp->mb[3] = LSW(addr);
|
||||
mcp->mb[4] = MSW(addr);
|
||||
mcp->mb[5] = LSW(data);
|
||||
mcp->mb[6] = MSW(data);
|
||||
mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
|
||||
mcp->in_mb = MBX_0;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
mcp->flags = 0;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x1187,
|
||||
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
|
||||
} else {
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
|
||||
"Done %s.\n", __func__);
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
|
||||
{
|
||||
int rval;
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
if (!IS_QLA8044(vha->hw))
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
|
||||
"Entered %s.\n", __func__);
|
||||
|
||||
mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
|
||||
mcp->mb[1] = HCS_READ_SERDES;
|
||||
mcp->mb[3] = LSW(addr);
|
||||
mcp->mb[4] = MSW(addr);
|
||||
mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
|
||||
mcp->in_mb = MBX_2|MBX_1|MBX_0;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
mcp->flags = 0;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
|
||||
*data = mcp->mb[2] << 16 | mcp->mb[1];
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x118a,
|
||||
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
|
||||
} else {
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
|
||||
"Done %s.\n", __func__);
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla2x00_set_serdes_params() -
|
||||
* @ha: HA context
|
||||
@ -3660,6 +3734,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
|
||||
"Entered %s.\n", __func__);
|
||||
|
||||
if (IS_SHADOW_REG_CAPABLE(ha))
|
||||
req->options |= BIT_13;
|
||||
|
||||
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
|
||||
mcp->mb[1] = req->options;
|
||||
mcp->mb[2] = MSW(LSD(req->dma));
|
||||
@ -3679,7 +3756,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
|
||||
/* que in ptr index */
|
||||
mcp->mb[8] = 0;
|
||||
/* que out ptr index */
|
||||
mcp->mb[9] = 0;
|
||||
mcp->mb[9] = *req->out_ptr = 0;
|
||||
mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
|
||||
MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
|
||||
mcp->in_mb = MBX_0;
|
||||
@ -3688,7 +3765,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
|
||||
|
||||
if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
|
||||
mcp->in_mb |= MBX_1;
|
||||
if (IS_QLA83XX(ha) || !IS_QLA27XX(ha)) {
|
||||
if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
|
||||
mcp->out_mb |= MBX_15;
|
||||
/* debug q create issue in SR-IOV */
|
||||
mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
|
||||
@ -3697,7 +3774,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
if (!(req->options & BIT_0)) {
|
||||
WRT_REG_DWORD(req->req_q_in, 0);
|
||||
if (!IS_QLA83XX(ha) || !IS_QLA27XX(ha))
|
||||
if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
|
||||
WRT_REG_DWORD(req->req_q_out, 0);
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
@ -3726,6 +3803,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
|
||||
"Entered %s.\n", __func__);
|
||||
|
||||
if (IS_SHADOW_REG_CAPABLE(ha))
|
||||
rsp->options |= BIT_13;
|
||||
|
||||
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
|
||||
mcp->mb[1] = rsp->options;
|
||||
mcp->mb[2] = MSW(LSD(rsp->dma));
|
||||
@ -3740,7 +3820,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
|
||||
|
||||
mcp->mb[4] = rsp->id;
|
||||
/* que in ptr index */
|
||||
mcp->mb[8] = 0;
|
||||
mcp->mb[8] = *rsp->in_ptr = 0;
|
||||
/* que out ptr index */
|
||||
mcp->mb[9] = 0;
|
||||
mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -527,21 +527,63 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int i, core;
|
||||
uint32_t cnt;
|
||||
uint32_t reg_val;
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0);
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0);
|
||||
|
||||
/* stop the XOR DMA engines */
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02);
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02);
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02);
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02);
|
||||
|
||||
/* stop the IDMA engines */
|
||||
reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840);
|
||||
reg_val &= ~(1<<12);
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val);
|
||||
|
||||
reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844);
|
||||
reg_val &= ~(1<<12);
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val);
|
||||
|
||||
reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848);
|
||||
reg_val &= ~(1<<12);
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val);
|
||||
|
||||
reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C);
|
||||
reg_val &= ~(1<<12);
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val);
|
||||
|
||||
for (i = 0; i < 100000; i++) {
|
||||
if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 &&
|
||||
(QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0)
|
||||
break;
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
/* Set all 4 cores in reset */
|
||||
for (i = 0; i < 4; i++) {
|
||||
QLAFX00_SET_HBA_SOC_REG(ha,
|
||||
(SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
|
||||
}
|
||||
|
||||
/* Set all 4 core Clock gating control */
|
||||
for (i = 0; i < 4; i++) {
|
||||
QLAFX00_SET_HBA_SOC_REG(ha,
|
||||
(SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
|
||||
}
|
||||
|
||||
/* Reset all units in Fabric */
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101));
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101));
|
||||
|
||||
/* */
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1);
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0);
|
||||
|
||||
/* Set all 4 core Memory Power Down Registers */
|
||||
for (i = 0; i < 5; i++) {
|
||||
QLAFX00_SET_HBA_SOC_REG(ha,
|
||||
(SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0));
|
||||
}
|
||||
|
||||
/* Reset all interrupt control registers */
|
||||
for (i = 0; i < 115; i++) {
|
||||
@ -564,20 +606,19 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
|
||||
/* Kick in Fabric units */
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
|
||||
|
||||
/* Kick in Core0 to start boot process */
|
||||
QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
|
||||
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
/* Wait 10secs for soft-reset to complete. */
|
||||
for (cnt = 10; cnt; cnt--) {
|
||||
msleep(1000);
|
||||
barrier();
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -597,7 +638,6 @@ qlafx00_soft_reset(scsi_qla_host_t *vha)
|
||||
|
||||
ha->isp_ops->disable_intrs(ha);
|
||||
qlafx00_soc_cpu_reset(vha);
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2675,7 +2715,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
|
||||
uint16_t lreq_q_out = 0;
|
||||
|
||||
lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
|
||||
lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out);
|
||||
lreq_q_out = rsp->ring_index;
|
||||
|
||||
while (lreq_q_in != lreq_q_out) {
|
||||
lptr = rsp->ring_ptr;
|
||||
@ -3426,7 +3466,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
|
||||
sp->fcport->vha, 0x3047,
|
||||
(uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
|
||||
|
||||
memcpy((void *)pfxiocb, &fx_iocb,
|
||||
memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
|
||||
sizeof(struct fxdisc_entry_fx00));
|
||||
wmb();
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -351,6 +351,7 @@ struct config_info_data {
|
||||
#define SOC_FABRIC_RST_CONTROL_REG 0x0020840
|
||||
#define SOC_FABRIC_CONTROL_REG 0x0020200
|
||||
#define SOC_FABRIC_CONFIG_REG 0x0020204
|
||||
#define SOC_PWR_MANAGEMENT_PWR_DOWN_REG 0x001820C
|
||||
|
||||
#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00
|
||||
#define SOC_CORE_TIMER_REG 0x0021850
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -848,6 +848,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
|
||||
{
|
||||
int done = 0, timeout = 0;
|
||||
uint32_t lock_owner = 0;
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
while (!done) {
|
||||
/* acquire semaphore2 from PCI HW block */
|
||||
@ -856,17 +857,21 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
|
||||
break;
|
||||
if (timeout >= qla82xx_rom_lock_timeout) {
|
||||
lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
|
||||
ql_log(ql_log_warn, vha, 0xb157,
|
||||
"%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
|
||||
__func__, ha->portnum, lock_owner);
|
||||
return -1;
|
||||
}
|
||||
timeout++;
|
||||
}
|
||||
qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
|
||||
qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
qla82xx_rom_unlock(struct qla_hw_data *ha)
|
||||
{
|
||||
qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff);
|
||||
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
|
||||
}
|
||||
|
||||
@ -950,6 +955,7 @@ static int
|
||||
qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
|
||||
{
|
||||
int ret, loops = 0;
|
||||
uint32_t lock_owner = 0;
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
|
||||
@ -958,8 +964,10 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
|
||||
loops++;
|
||||
}
|
||||
if (loops >= 50000) {
|
||||
lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
|
||||
ql_log(ql_log_fatal, vha, 0x00b9,
|
||||
"Failed to acquire SEM2 lock.\n");
|
||||
"Failed to acquire SEM2 lock, Lock Owner %u.\n",
|
||||
lock_owner);
|
||||
return -1;
|
||||
}
|
||||
ret = qla82xx_do_rom_fast_read(ha, addr, valp);
|
||||
@ -1057,6 +1065,7 @@ static int
|
||||
ql82xx_rom_lock_d(struct qla_hw_data *ha)
|
||||
{
|
||||
int loops = 0;
|
||||
uint32_t lock_owner = 0;
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
|
||||
@ -1065,8 +1074,9 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha)
|
||||
loops++;
|
||||
}
|
||||
if (loops >= 50000) {
|
||||
lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
|
||||
ql_log(ql_log_warn, vha, 0xb010,
|
||||
"ROM lock failed.\n");
|
||||
"ROM lock failed, Lock Owner %u.\n", lock_owner);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
@ -2811,12 +2821,14 @@ static void
|
||||
qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
|
||||
{
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
uint32_t lock_owner = 0;
|
||||
|
||||
if (qla82xx_rom_lock(ha))
|
||||
if (qla82xx_rom_lock(ha)) {
|
||||
lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
|
||||
/* Someone else is holding the lock. */
|
||||
ql_log(ql_log_info, vha, 0xb022,
|
||||
"Resetting rom_lock.\n");
|
||||
|
||||
"Resetting rom_lock, Lock Owner %u.\n", lock_owner);
|
||||
}
|
||||
/*
|
||||
* Either we got the lock, or someone
|
||||
* else died while holding it.
|
||||
@ -2840,47 +2852,30 @@ static int
|
||||
qla82xx_device_bootstrap(scsi_qla_host_t *vha)
|
||||
{
|
||||
int rval = QLA_SUCCESS;
|
||||
int i, timeout;
|
||||
int i;
|
||||
uint32_t old_count, count;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int need_reset = 0, peg_stuck = 1;
|
||||
int need_reset = 0;
|
||||
|
||||
need_reset = qla82xx_need_reset(ha);
|
||||
|
||||
old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
|
||||
|
||||
for (i = 0; i < 10; i++) {
|
||||
timeout = msleep_interruptible(200);
|
||||
if (timeout) {
|
||||
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
|
||||
QLA8XXX_DEV_FAILED);
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
|
||||
count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
|
||||
if (count != old_count)
|
||||
peg_stuck = 0;
|
||||
}
|
||||
|
||||
if (need_reset) {
|
||||
/* We are trying to perform a recovery here. */
|
||||
if (peg_stuck)
|
||||
if (ha->flags.isp82xx_fw_hung)
|
||||
qla82xx_rom_lock_recovery(ha);
|
||||
goto dev_initialize;
|
||||
} else {
|
||||
/* Start of day for this ha context. */
|
||||
if (peg_stuck) {
|
||||
/* Either we are the first or recovery in progress. */
|
||||
qla82xx_rom_lock_recovery(ha);
|
||||
goto dev_initialize;
|
||||
} else
|
||||
/* Firmware already running. */
|
||||
goto dev_ready;
|
||||
old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
|
||||
for (i = 0; i < 10; i++) {
|
||||
msleep(200);
|
||||
count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
|
||||
if (count != old_count) {
|
||||
rval = QLA_SUCCESS;
|
||||
goto dev_ready;
|
||||
}
|
||||
}
|
||||
qla82xx_rom_lock_recovery(ha);
|
||||
}
|
||||
|
||||
return rval;
|
||||
|
||||
dev_initialize:
|
||||
/* set to DEV_INITIALIZING */
|
||||
ql_log(ql_log_info, vha, 0x009e,
|
||||
"HW State: INITIALIZING.\n");
|
||||
@ -3142,18 +3137,18 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
|
||||
|
||||
if (ql2xmdenable) {
|
||||
if (!ha->fw_dumped) {
|
||||
if (fw_major_version != ha->fw_major_version ||
|
||||
if ((fw_major_version != ha->fw_major_version ||
|
||||
fw_minor_version != ha->fw_minor_version ||
|
||||
fw_subminor_version != ha->fw_subminor_version) {
|
||||
fw_subminor_version != ha->fw_subminor_version) ||
|
||||
(ha->prev_minidump_failed)) {
|
||||
ql_dbg(ql_dbg_p3p, vha, 0xb02d,
|
||||
"Firmware version differs "
|
||||
"Previous version: %d:%d:%d - "
|
||||
"New version: %d:%d:%d\n",
|
||||
"Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n",
|
||||
fw_major_version, fw_minor_version,
|
||||
fw_subminor_version,
|
||||
ha->fw_major_version,
|
||||
ha->fw_minor_version,
|
||||
ha->fw_subminor_version);
|
||||
ha->fw_subminor_version,
|
||||
ha->prev_minidump_failed);
|
||||
/* Release MiniDump resources */
|
||||
qla82xx_md_free(vha);
|
||||
/* ALlocate MiniDump resources */
|
||||
@ -3682,8 +3677,10 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
|
||||
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
|
||||
sp = req->outstanding_cmds[cnt];
|
||||
if (sp) {
|
||||
if (!sp->u.scmd.ctx ||
|
||||
(sp->flags & SRB_FCP_CMND_DMA_VALID)) {
|
||||
if ((!sp->u.scmd.ctx ||
|
||||
(sp->flags &
|
||||
SRB_FCP_CMND_DMA_VALID)) &&
|
||||
!ha->flags.isp82xx_fw_hung) {
|
||||
spin_unlock_irqrestore(
|
||||
&ha->hardware_lock, flags);
|
||||
if (ha->isp_ops->abort_command(sp)) {
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -333,9 +333,6 @@
|
||||
#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
|
||||
#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
|
||||
|
||||
/* Lock IDs for ROM lock */
|
||||
#define ROM_LOCK_DRIVER 0x0d417340
|
||||
|
||||
#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */
|
||||
#define QLA82XX_PCI_CRB_WINDOW(A) \
|
||||
(QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
|
||||
@ -1186,6 +1183,7 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
|
||||
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
|
||||
|
||||
#define qla82xx_get_temp_val(x) ((x) >> 16)
|
||||
#define qla82xx_get_temp_val1(x) ((x) && 0x0000FFFF)
|
||||
#define qla82xx_get_temp_state(x) ((x) & 0xffff)
|
||||
#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
|
||||
|
||||
|
@ -1,17 +1,20 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "qla_def.h"
|
||||
#include "qla_gbl.h"
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#define TIMEOUT_100_MS 100
|
||||
|
||||
/* 8044 Flash Read/Write functions */
|
||||
uint32_t
|
||||
qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
|
||||
@ -117,6 +120,95 @@ qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
|
||||
qla8044_wr_reg_indirect(vha, waddr, value);
|
||||
}
|
||||
|
||||
static int
|
||||
qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
|
||||
uint32_t mask)
|
||||
{
|
||||
unsigned long timeout;
|
||||
uint32_t temp;
|
||||
|
||||
/* jiffies after 100ms */
|
||||
timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
|
||||
do {
|
||||
qla8044_rd_reg_indirect(vha, addr1, &temp);
|
||||
if ((temp & mask) != 0)
|
||||
break;
|
||||
if (time_after_eq(jiffies, timeout)) {
|
||||
ql_log(ql_log_warn, vha, 0xb151,
|
||||
"Error in processing rdmdio entry\n");
|
||||
return -1;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha,
|
||||
uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr)
|
||||
{
|
||||
uint32_t temp;
|
||||
int ret = 0;
|
||||
|
||||
ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
|
||||
if (ret == -1)
|
||||
return -1;
|
||||
|
||||
temp = (0x40000000 | addr);
|
||||
qla8044_wr_reg_indirect(vha, addr1, temp);
|
||||
|
||||
ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
|
||||
if (ret == -1)
|
||||
return 0;
|
||||
|
||||
qla8044_rd_reg_indirect(vha, addr3, &ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha,
|
||||
uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask)
|
||||
{
|
||||
unsigned long timeout;
|
||||
uint32_t temp;
|
||||
|
||||
/* jiffies after 100 msecs */
|
||||
timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
|
||||
do {
|
||||
temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2);
|
||||
if ((temp & 0x1) != 1)
|
||||
break;
|
||||
if (time_after_eq(jiffies, timeout)) {
|
||||
ql_log(ql_log_warn, vha, 0xb152,
|
||||
"Error in processing mdiobus idle\n");
|
||||
return -1;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1,
|
||||
uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
|
||||
if (ret == -1)
|
||||
return -1;
|
||||
|
||||
qla8044_wr_reg_indirect(vha, addr3, value);
|
||||
qla8044_wr_reg_indirect(vha, addr1, addr);
|
||||
|
||||
ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
|
||||
if (ret == -1)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
|
||||
* Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
|
||||
@ -356,8 +448,8 @@ qla8044_flash_lock(scsi_qla_host_t *vha)
|
||||
lock_owner = qla8044_rd_reg(ha,
|
||||
QLA8044_FLASH_LOCK_ID);
|
||||
ql_log(ql_log_warn, vha, 0xb113,
|
||||
"%s: flash lock by %d failed, held by %d\n",
|
||||
__func__, ha->portnum, lock_owner);
|
||||
"%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
|
||||
__func__, ha->portnum, lock_owner);
|
||||
ret_val = QLA_FUNCTION_FAILED;
|
||||
break;
|
||||
}
|
||||
@ -1541,7 +1633,7 @@ static void
|
||||
qla8044_need_reset_handler(struct scsi_qla_host *vha)
|
||||
{
|
||||
uint32_t dev_state = 0, drv_state, drv_active;
|
||||
unsigned long reset_timeout, dev_init_timeout;
|
||||
unsigned long reset_timeout;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
ql_log(ql_log_fatal, vha, 0xb0c2,
|
||||
@ -1555,84 +1647,78 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha)
|
||||
qla8044_idc_lock(ha);
|
||||
}
|
||||
|
||||
dev_state = qla8044_rd_direct(vha,
|
||||
QLA8044_CRB_DEV_STATE_INDEX);
|
||||
drv_state = qla8044_rd_direct(vha,
|
||||
QLA8044_CRB_DRV_STATE_INDEX);
|
||||
drv_active = qla8044_rd_direct(vha,
|
||||
QLA8044_CRB_DRV_ACTIVE_INDEX);
|
||||
|
||||
ql_log(ql_log_info, vha, 0xb0c5,
|
||||
"%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
|
||||
__func__, vha->host_no, drv_state, drv_active);
|
||||
"%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n",
|
||||
__func__, vha->host_no, drv_state, drv_active, dev_state);
|
||||
|
||||
if (!ha->flags.nic_core_reset_owner) {
|
||||
ql_dbg(ql_dbg_p3p, vha, 0xb0c3,
|
||||
"%s(%ld): reset acknowledged\n",
|
||||
__func__, vha->host_no);
|
||||
qla8044_set_rst_ready(vha);
|
||||
qla8044_set_rst_ready(vha);
|
||||
|
||||
/* Non-reset owners ACK Reset and wait for device INIT state
|
||||
* as part of Reset Recovery by Reset Owner
|
||||
*/
|
||||
dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
|
||||
/* wait for 10 seconds for reset ack from all functions */
|
||||
reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
|
||||
|
||||
do {
|
||||
if (time_after_eq(jiffies, dev_init_timeout)) {
|
||||
ql_log(ql_log_info, vha, 0xb0c4,
|
||||
"%s: Non Reset owner: Reset Ack Timeout!\n",
|
||||
__func__);
|
||||
break;
|
||||
}
|
||||
do {
|
||||
if (time_after_eq(jiffies, reset_timeout)) {
|
||||
ql_log(ql_log_info, vha, 0xb0c4,
|
||||
"%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n",
|
||||
__func__, ha->portnum, drv_state, drv_active);
|
||||
break;
|
||||
}
|
||||
|
||||
qla8044_idc_unlock(ha);
|
||||
msleep(1000);
|
||||
qla8044_idc_lock(ha);
|
||||
qla8044_idc_unlock(ha);
|
||||
msleep(1000);
|
||||
qla8044_idc_lock(ha);
|
||||
|
||||
dev_state = qla8044_rd_direct(vha,
|
||||
QLA8044_CRB_DEV_STATE_INDEX);
|
||||
} while (((drv_state & drv_active) != drv_active) &&
|
||||
(dev_state == QLA8XXX_DEV_NEED_RESET));
|
||||
dev_state = qla8044_rd_direct(vha,
|
||||
QLA8044_CRB_DEV_STATE_INDEX);
|
||||
drv_state = qla8044_rd_direct(vha,
|
||||
QLA8044_CRB_DRV_STATE_INDEX);
|
||||
drv_active = qla8044_rd_direct(vha,
|
||||
QLA8044_CRB_DRV_ACTIVE_INDEX);
|
||||
} while (((drv_state & drv_active) != drv_active) &&
|
||||
(dev_state == QLA8XXX_DEV_NEED_RESET));
|
||||
|
||||
/* Remove IDC participation of functions not acknowledging */
|
||||
if (drv_state != drv_active) {
|
||||
ql_log(ql_log_info, vha, 0xb0c7,
|
||||
"%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n",
|
||||
__func__, vha->host_no, ha->portnum,
|
||||
(drv_active ^ drv_state));
|
||||
drv_active = drv_active & drv_state;
|
||||
qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
|
||||
drv_active);
|
||||
} else {
|
||||
qla8044_set_rst_ready(vha);
|
||||
|
||||
/* wait for 10 seconds for reset ack from all functions */
|
||||
reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
|
||||
|
||||
while ((drv_state & drv_active) != drv_active) {
|
||||
if (time_after_eq(jiffies, reset_timeout)) {
|
||||
ql_log(ql_log_info, vha, 0xb0c6,
|
||||
"%s: RESET TIMEOUT!"
|
||||
"drv_state: 0x%08x, drv_active: 0x%08x\n",
|
||||
QLA2XXX_DRIVER_NAME, drv_state, drv_active);
|
||||
break;
|
||||
}
|
||||
|
||||
qla8044_idc_unlock(ha);
|
||||
msleep(1000);
|
||||
qla8044_idc_lock(ha);
|
||||
|
||||
drv_state = qla8044_rd_direct(vha,
|
||||
QLA8044_CRB_DRV_STATE_INDEX);
|
||||
drv_active = qla8044_rd_direct(vha,
|
||||
QLA8044_CRB_DRV_ACTIVE_INDEX);
|
||||
}
|
||||
|
||||
if (drv_state != drv_active) {
|
||||
ql_log(ql_log_info, vha, 0xb0c7,
|
||||
"%s(%ld): Reset_owner turning off drv_active "
|
||||
"of non-acking function 0x%x\n", __func__,
|
||||
vha->host_no, (drv_active ^ drv_state));
|
||||
drv_active = drv_active & drv_state;
|
||||
qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
|
||||
drv_active);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear RESET OWNER, will be set at next reset
|
||||
* by next RST_OWNER
|
||||
*/
|
||||
ha->flags.nic_core_reset_owner = 0;
|
||||
* Reset owner should execute reset recovery,
|
||||
* if all functions acknowledged
|
||||
*/
|
||||
if ((ha->flags.nic_core_reset_owner) &&
|
||||
(dev_state == QLA8XXX_DEV_NEED_RESET)) {
|
||||
ha->flags.nic_core_reset_owner = 0;
|
||||
qla8044_device_bootstrap(vha);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Start Reset Recovery */
|
||||
/* Exit if non active function */
|
||||
if (!(drv_active & (1 << ha->portnum))) {
|
||||
ha->flags.nic_core_reset_owner = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Execute Reset Recovery if Reset Owner or Function 7
|
||||
* is the only active function
|
||||
*/
|
||||
if (ha->flags.nic_core_reset_owner ||
|
||||
((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) {
|
||||
ha->flags.nic_core_reset_owner = 0;
|
||||
qla8044_device_bootstrap(vha);
|
||||
}
|
||||
}
|
||||
@ -1655,6 +1741,19 @@ qla8044_set_drv_active(struct scsi_qla_host *vha)
|
||||
qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
|
||||
}
|
||||
|
||||
static int
|
||||
qla8044_check_drv_active(struct scsi_qla_host *vha)
|
||||
{
|
||||
uint32_t drv_active;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
|
||||
if (drv_active & (1 << ha->portnum))
|
||||
return QLA_SUCCESS;
|
||||
else
|
||||
return QLA_TEST_FAILED;
|
||||
}
|
||||
|
||||
static void
|
||||
qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
|
||||
{
|
||||
@ -1837,14 +1936,16 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)
|
||||
|
||||
while (1) {
|
||||
if (time_after_eq(jiffies, dev_init_timeout)) {
|
||||
ql_log(ql_log_warn, vha, 0xb0cf,
|
||||
"%s: Device Init Failed 0x%x = %s\n",
|
||||
QLA2XXX_DRIVER_NAME, dev_state,
|
||||
dev_state < MAX_STATES ?
|
||||
qdev_state(dev_state) : "Unknown");
|
||||
|
||||
qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
|
||||
QLA8XXX_DEV_FAILED);
|
||||
if (qla8044_check_drv_active(vha) == QLA_SUCCESS) {
|
||||
ql_log(ql_log_warn, vha, 0xb0cf,
|
||||
"%s: Device Init Failed 0x%x = %s\n",
|
||||
QLA2XXX_DRIVER_NAME, dev_state,
|
||||
dev_state < MAX_STATES ?
|
||||
qdev_state(dev_state) : "Unknown");
|
||||
qla8044_wr_direct(vha,
|
||||
QLA8044_CRB_DEV_STATE_INDEX,
|
||||
QLA8XXX_DEV_FAILED);
|
||||
}
|
||||
}
|
||||
|
||||
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
|
||||
@ -2017,6 +2118,13 @@ qla8044_watchdog(struct scsi_qla_host *vha)
|
||||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
|
||||
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
|
||||
|
||||
if (qla8044_check_fw_alive(vha)) {
|
||||
ha->flags.isp82xx_fw_hung = 1;
|
||||
ql_log(ql_log_warn, vha, 0xb10a,
|
||||
"Firmware hung.\n");
|
||||
qla82xx_clear_pending_mbx(vha);
|
||||
}
|
||||
|
||||
if (qla8044_check_temp(vha)) {
|
||||
set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
|
||||
ha->flags.isp82xx_fw_hung = 1;
|
||||
@ -2037,7 +2145,7 @@ qla8044_watchdog(struct scsi_qla_host *vha)
|
||||
qla2xxx_wake_dpc(vha);
|
||||
} else {
|
||||
/* Check firmware health */
|
||||
if (qla8044_check_fw_alive(vha)) {
|
||||
if (ha->flags.isp82xx_fw_hung) {
|
||||
halt_status = qla8044_rd_direct(vha,
|
||||
QLA8044_PEG_HALT_STATUS1_INDEX);
|
||||
if (halt_status &
|
||||
@ -2073,12 +2181,8 @@ qla8044_watchdog(struct scsi_qla_host *vha)
|
||||
__func__);
|
||||
set_bit(ISP_ABORT_NEEDED,
|
||||
&vha->dpc_flags);
|
||||
qla82xx_clear_pending_mbx(vha);
|
||||
}
|
||||
}
|
||||
ha->flags.isp82xx_fw_hung = 1;
|
||||
ql_log(ql_log_warn, vha, 0xb10a,
|
||||
"Firmware hung.\n");
|
||||
qla2xxx_wake_dpc(vha);
|
||||
}
|
||||
}
|
||||
@ -2286,8 +2390,6 @@ qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
|
||||
}
|
||||
|
||||
if (j >= MAX_CTL_CHECK) {
|
||||
printk_ratelimited(KERN_ERR
|
||||
"%s: failed to read through agent\n", __func__);
|
||||
write_unlock_irqrestore(&ha->hw_lock, flags);
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
@ -2882,6 +2984,231 @@ error_exit:
|
||||
return rval;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
|
||||
struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
|
||||
{
|
||||
int loop_cnt;
|
||||
uint32_t addr1, addr2, value, data, temp, wrVal;
|
||||
uint8_t stride, stride2;
|
||||
uint16_t count;
|
||||
uint32_t poll, mask, data_size, modify_mask;
|
||||
uint32_t wait_count = 0;
|
||||
|
||||
uint32_t *data_ptr = *d_ptr;
|
||||
|
||||
struct qla8044_minidump_entry_rddfe *rddfe;
|
||||
rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
|
||||
|
||||
addr1 = rddfe->addr_1;
|
||||
value = rddfe->value;
|
||||
stride = rddfe->stride;
|
||||
stride2 = rddfe->stride2;
|
||||
count = rddfe->count;
|
||||
|
||||
poll = rddfe->poll;
|
||||
mask = rddfe->mask;
|
||||
modify_mask = rddfe->modify_mask;
|
||||
data_size = rddfe->data_size;
|
||||
|
||||
addr2 = addr1 + stride;
|
||||
|
||||
for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
|
||||
qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value));
|
||||
|
||||
wait_count = 0;
|
||||
while (wait_count < poll) {
|
||||
qla8044_rd_reg_indirect(vha, addr1, &temp);
|
||||
if ((temp & mask) != 0)
|
||||
break;
|
||||
wait_count++;
|
||||
}
|
||||
|
||||
if (wait_count == poll) {
|
||||
ql_log(ql_log_warn, vha, 0xb153,
|
||||
"%s: TIMEOUT\n", __func__);
|
||||
goto error;
|
||||
} else {
|
||||
qla8044_rd_reg_indirect(vha, addr2, &temp);
|
||||
temp = temp & modify_mask;
|
||||
temp = (temp | ((loop_cnt << 16) | loop_cnt));
|
||||
wrVal = ((temp << 16) | temp);
|
||||
|
||||
qla8044_wr_reg_indirect(vha, addr2, wrVal);
|
||||
qla8044_wr_reg_indirect(vha, addr1, value);
|
||||
|
||||
wait_count = 0;
|
||||
while (wait_count < poll) {
|
||||
qla8044_rd_reg_indirect(vha, addr1, &temp);
|
||||
if ((temp & mask) != 0)
|
||||
break;
|
||||
wait_count++;
|
||||
}
|
||||
if (wait_count == poll) {
|
||||
ql_log(ql_log_warn, vha, 0xb154,
|
||||
"%s: TIMEOUT\n", __func__);
|
||||
goto error;
|
||||
}
|
||||
|
||||
qla8044_wr_reg_indirect(vha, addr1,
|
||||
((0x40000000 | value) + stride2));
|
||||
wait_count = 0;
|
||||
while (wait_count < poll) {
|
||||
qla8044_rd_reg_indirect(vha, addr1, &temp);
|
||||
if ((temp & mask) != 0)
|
||||
break;
|
||||
wait_count++;
|
||||
}
|
||||
|
||||
if (wait_count == poll) {
|
||||
ql_log(ql_log_warn, vha, 0xb155,
|
||||
"%s: TIMEOUT\n", __func__);
|
||||
goto error;
|
||||
}
|
||||
|
||||
qla8044_rd_reg_indirect(vha, addr2, &data);
|
||||
|
||||
*data_ptr++ = wrVal;
|
||||
*data_ptr++ = data;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
*d_ptr = data_ptr;
|
||||
return QLA_SUCCESS;
|
||||
|
||||
error:
|
||||
return -1;
|
||||
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
|
||||
struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t addr1, addr2, value1, value2, data, selVal;
|
||||
uint8_t stride1, stride2;
|
||||
uint32_t addr3, addr4, addr5, addr6, addr7;
|
||||
uint16_t count, loop_cnt;
|
||||
uint32_t poll, mask;
|
||||
uint32_t *data_ptr = *d_ptr;
|
||||
|
||||
struct qla8044_minidump_entry_rdmdio *rdmdio;
|
||||
|
||||
rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr;
|
||||
|
||||
addr1 = rdmdio->addr_1;
|
||||
addr2 = rdmdio->addr_2;
|
||||
value1 = rdmdio->value_1;
|
||||
stride1 = rdmdio->stride_1;
|
||||
stride2 = rdmdio->stride_2;
|
||||
count = rdmdio->count;
|
||||
|
||||
poll = rdmdio->poll;
|
||||
mask = rdmdio->mask;
|
||||
value2 = rdmdio->value_2;
|
||||
|
||||
addr3 = addr1 + stride1;
|
||||
|
||||
for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
|
||||
ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
|
||||
addr3, mask);
|
||||
if (ret == -1)
|
||||
goto error;
|
||||
|
||||
addr4 = addr2 - stride1;
|
||||
ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4,
|
||||
value2);
|
||||
if (ret == -1)
|
||||
goto error;
|
||||
|
||||
addr5 = addr2 - (2 * stride1);
|
||||
ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5,
|
||||
value1);
|
||||
if (ret == -1)
|
||||
goto error;
|
||||
|
||||
addr6 = addr2 - (3 * stride1);
|
||||
ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask,
|
||||
addr6, 0x2);
|
||||
if (ret == -1)
|
||||
goto error;
|
||||
|
||||
ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
|
||||
addr3, mask);
|
||||
if (ret == -1)
|
||||
goto error;
|
||||
|
||||
addr7 = addr2 - (4 * stride1);
|
||||
data = qla8044_ipmdio_rd_reg(vha, addr1, addr3,
|
||||
mask, addr7);
|
||||
if (data == -1)
|
||||
goto error;
|
||||
|
||||
selVal = (value2 << 18) | (value1 << 2) | 2;
|
||||
|
||||
stride2 = rdmdio->stride_2;
|
||||
*data_ptr++ = selVal;
|
||||
*data_ptr++ = data;
|
||||
|
||||
value1 = value1 + stride2;
|
||||
*d_ptr = data_ptr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
|
||||
struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
|
||||
{
|
||||
uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
|
||||
uint32_t wait_count = 0;
|
||||
struct qla8044_minidump_entry_pollwr *pollwr_hdr;
|
||||
|
||||
pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
|
||||
addr1 = pollwr_hdr->addr_1;
|
||||
addr2 = pollwr_hdr->addr_2;
|
||||
value1 = pollwr_hdr->value_1;
|
||||
value2 = pollwr_hdr->value_2;
|
||||
|
||||
poll = pollwr_hdr->poll;
|
||||
mask = pollwr_hdr->mask;
|
||||
|
||||
while (wait_count < poll) {
|
||||
qla8044_rd_reg_indirect(vha, addr1, &r_value);
|
||||
|
||||
if ((r_value & poll) != 0)
|
||||
break;
|
||||
wait_count++;
|
||||
}
|
||||
|
||||
if (wait_count == poll) {
|
||||
ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__);
|
||||
goto error;
|
||||
}
|
||||
|
||||
qla8044_wr_reg_indirect(vha, addr2, value2);
|
||||
qla8044_wr_reg_indirect(vha, addr1, value1);
|
||||
|
||||
wait_count = 0;
|
||||
while (wait_count < poll) {
|
||||
qla8044_rd_reg_indirect(vha, addr1, &r_value);
|
||||
|
||||
if ((r_value & poll) != 0)
|
||||
break;
|
||||
wait_count++;
|
||||
}
|
||||
|
||||
return QLA_SUCCESS;
|
||||
|
||||
error:
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* qla8044_collect_md_data - Retrieve firmware minidump data.
|
||||
@ -3089,6 +3416,24 @@ qla8044_collect_md_data(struct scsi_qla_host *vha)
|
||||
if (rval != QLA_SUCCESS)
|
||||
qla8044_mark_entry_skipped(vha, entry_hdr, i);
|
||||
break;
|
||||
case QLA8044_RDDFE:
|
||||
rval = qla8044_minidump_process_rddfe(vha, entry_hdr,
|
||||
&data_ptr);
|
||||
if (rval != QLA_SUCCESS)
|
||||
qla8044_mark_entry_skipped(vha, entry_hdr, i);
|
||||
break;
|
||||
case QLA8044_RDMDIO:
|
||||
rval = qla8044_minidump_process_rdmdio(vha, entry_hdr,
|
||||
&data_ptr);
|
||||
if (rval != QLA_SUCCESS)
|
||||
qla8044_mark_entry_skipped(vha, entry_hdr, i);
|
||||
break;
|
||||
case QLA8044_POLLWR:
|
||||
rval = qla8044_minidump_process_pollwr(vha, entry_hdr,
|
||||
&data_ptr);
|
||||
if (rval != QLA_SUCCESS)
|
||||
qla8044_mark_entry_skipped(vha, entry_hdr, i);
|
||||
break;
|
||||
case QLA82XX_RDNOP:
|
||||
default:
|
||||
qla8044_mark_entry_skipped(vha, entry_hdr, i);
|
||||
@ -3110,6 +3455,7 @@ skip_nxt_entry:
|
||||
"Dump data mismatch: Data collected: "
|
||||
"[0x%x], total_data_size:[0x%x]\n",
|
||||
data_collected, ha->md_dump_size);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
goto md_failed;
|
||||
}
|
||||
|
||||
@ -3134,10 +3480,12 @@ qla8044_get_minidump(struct scsi_qla_host *vha)
|
||||
|
||||
if (!qla8044_collect_md_data(vha)) {
|
||||
ha->fw_dumped = 1;
|
||||
ha->prev_minidump_failed = 0;
|
||||
} else {
|
||||
ql_log(ql_log_fatal, vha, 0xb0db,
|
||||
"%s: Unable to collect minidump\n",
|
||||
__func__);
|
||||
ha->prev_minidump_failed = 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -133,6 +133,7 @@
|
||||
#define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
|
||||
#define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
|
||||
#define QLA8044_LINK_SPEED_FACTOR 10
|
||||
#define QLA8044_FUN7_ACTIVE_INDEX 0x80
|
||||
|
||||
/* FLASH API Defines */
|
||||
#define QLA8044_FLASH_MAX_WAIT_USEC 100
|
||||
@ -431,6 +432,50 @@ struct qla8044_minidump_entry_pollrd {
|
||||
uint32_t rsvd_1;
|
||||
} __packed;
|
||||
|
||||
struct qla8044_minidump_entry_rddfe {
|
||||
struct qla8044_minidump_entry_hdr h;
|
||||
uint32_t addr_1;
|
||||
uint32_t value;
|
||||
uint8_t stride;
|
||||
uint8_t stride2;
|
||||
uint16_t count;
|
||||
uint32_t poll;
|
||||
uint32_t mask;
|
||||
uint32_t modify_mask;
|
||||
uint32_t data_size;
|
||||
uint32_t rsvd;
|
||||
|
||||
} __packed;
|
||||
|
||||
struct qla8044_minidump_entry_rdmdio {
|
||||
struct qla8044_minidump_entry_hdr h;
|
||||
|
||||
uint32_t addr_1;
|
||||
uint32_t addr_2;
|
||||
uint32_t value_1;
|
||||
uint8_t stride_1;
|
||||
uint8_t stride_2;
|
||||
uint16_t count;
|
||||
uint32_t poll;
|
||||
uint32_t mask;
|
||||
uint32_t value_2;
|
||||
uint32_t data_size;
|
||||
|
||||
} __packed;
|
||||
|
||||
struct qla8044_minidump_entry_pollwr {
|
||||
struct qla8044_minidump_entry_hdr h;
|
||||
uint32_t addr_1;
|
||||
uint32_t addr_2;
|
||||
uint32_t value_1;
|
||||
uint32_t value_2;
|
||||
uint32_t poll;
|
||||
uint32_t mask;
|
||||
uint32_t data_size;
|
||||
uint32_t rsvd;
|
||||
|
||||
} __packed;
|
||||
|
||||
/* RDMUX2 Entry */
|
||||
struct qla8044_minidump_entry_rdmux2 {
|
||||
struct qla8044_minidump_entry_hdr h;
|
||||
@ -516,6 +561,9 @@ static const uint32_t qla8044_reg_tbl[] = {
|
||||
#define QLA8044_DBG_RSVD_ARRAY_LEN 8
|
||||
#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16
|
||||
#define QLA8044_SS_PCI_INDEX 0
|
||||
#define QLA8044_RDDFE 38
|
||||
#define QLA8044_RDMDIO 39
|
||||
#define QLA8044_POLLWR 40
|
||||
|
||||
struct qla8044_minidump_template_hdr {
|
||||
uint32_t entry_type;
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -616,7 +616,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
|
||||
|
||||
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
|
||||
/* List assured to be having elements */
|
||||
qla2x00_clean_dsd_pool(ha, sp);
|
||||
qla2x00_clean_dsd_pool(ha, sp, NULL);
|
||||
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
|
||||
}
|
||||
|
||||
@ -781,7 +781,7 @@ static int
|
||||
qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
|
||||
{
|
||||
#define ABORT_POLLING_PERIOD 1000
|
||||
#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
|
||||
#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
|
||||
unsigned long wait_iter = ABORT_WAIT_ITER;
|
||||
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
@ -844,11 +844,8 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
|
||||
}
|
||||
|
||||
/*
|
||||
* qla2x00_wait_for_reset_ready
|
||||
* Wait till the HBA is online after going through
|
||||
* <= MAX_RETRIES_OF_ISP_ABORT or
|
||||
* finally HBA is disabled ie marked offline or flash
|
||||
* operations are in progress.
|
||||
* qla2x00_wait_for_hba_ready
|
||||
* Wait till the HBA is ready before doing driver unload
|
||||
*
|
||||
* Input:
|
||||
* ha - pointer to host adapter structure
|
||||
@ -857,35 +854,15 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
|
||||
* Does context switching-Release SPIN_LOCK
|
||||
* (if any) before calling this routine.
|
||||
*
|
||||
* Return:
|
||||
* Success (Adapter is online/no flash ops) : 0
|
||||
* Failed (Adapter is offline/disabled/flash ops in progress) : 1
|
||||
*/
|
||||
static int
|
||||
qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
|
||||
static void
|
||||
qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
|
||||
{
|
||||
int return_status;
|
||||
unsigned long wait_online;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
|
||||
while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
|
||||
test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
|
||||
test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
|
||||
ha->optrom_state != QLA_SWAITING ||
|
||||
ha->dpc_active) && time_before(jiffies, wait_online))
|
||||
while ((!(vha->flags.online) || ha->dpc_active ||
|
||||
ha->flags.mbox_busy))
|
||||
msleep(1000);
|
||||
|
||||
if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING)
|
||||
return_status = QLA_SUCCESS;
|
||||
else
|
||||
return_status = QLA_FUNCTION_FAILED;
|
||||
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x8019,
|
||||
"%s return status=%d.\n", __func__, return_status);
|
||||
|
||||
return return_status;
|
||||
}
|
||||
|
||||
int
|
||||
@ -945,7 +922,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
|
||||
int ret;
|
||||
unsigned int id, lun;
|
||||
unsigned long flags;
|
||||
int wait = 0;
|
||||
int rval, wait = 0;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (!CMD_SP(cmd))
|
||||
@ -974,10 +951,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
|
||||
sp_get(sp);
|
||||
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
if (ha->isp_ops->abort_command(sp)) {
|
||||
ret = FAILED;
|
||||
rval = ha->isp_ops->abort_command(sp);
|
||||
if (rval) {
|
||||
if (rval == QLA_FUNCTION_PARAMETER_ERROR) {
|
||||
/*
|
||||
* Decrement the ref_count since we can't find the
|
||||
* command
|
||||
*/
|
||||
atomic_dec(&sp->ref_count);
|
||||
ret = SUCCESS;
|
||||
} else
|
||||
ret = FAILED;
|
||||
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x8003,
|
||||
"Abort command mbx failed cmd=%p.\n", cmd);
|
||||
"Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);
|
||||
} else {
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x8004,
|
||||
"Abort command mbx success cmd=%p.\n", cmd);
|
||||
@ -985,6 +972,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
/*
|
||||
* Clear the slot in the oustanding_cmds array if we can't find the
|
||||
* command to reclaim the resources.
|
||||
*/
|
||||
if (rval == QLA_FUNCTION_PARAMETER_ERROR)
|
||||
vha->req->outstanding_cmds[sp->handle] = NULL;
|
||||
sp->done(ha, sp, 0);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
@ -1236,7 +1229,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
|
||||
ql_log(ql_log_info, vha, 0x8018,
|
||||
"ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
|
||||
|
||||
if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
|
||||
/*
|
||||
* No point in issuing another reset if one is active. Also do not
|
||||
* attempt a reset if we are updating flash.
|
||||
*/
|
||||
if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
|
||||
goto eh_host_reset_lock;
|
||||
|
||||
if (vha != base_vha) {
|
||||
@ -2270,6 +2267,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
|
||||
ha->device_type |= DT_IIDMA;
|
||||
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
||||
break;
|
||||
case PCI_DEVICE_ID_QLOGIC_ISP2271:
|
||||
ha->device_type |= DT_ISP2271;
|
||||
ha->device_type |= DT_ZIO_SUPPORTED;
|
||||
ha->device_type |= DT_FWI2;
|
||||
ha->device_type |= DT_IIDMA;
|
||||
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
||||
break;
|
||||
}
|
||||
|
||||
if (IS_QLA82XX(ha))
|
||||
@ -2346,7 +2350,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071) {
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) {
|
||||
bars = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
mem_only = 1;
|
||||
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
|
||||
@ -2877,6 +2882,7 @@ skip_dpc:
|
||||
|
||||
base_vha->flags.init_done = 1;
|
||||
base_vha->flags.online = 1;
|
||||
ha->prev_minidump_failed = 0;
|
||||
|
||||
ql_dbg(ql_dbg_init, base_vha, 0x00f2,
|
||||
"Init done and hba is online.\n");
|
||||
@ -3136,6 +3142,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
|
||||
base_vha = pci_get_drvdata(pdev);
|
||||
ha = base_vha->hw;
|
||||
|
||||
qla2x00_wait_for_hba_ready(base_vha);
|
||||
|
||||
set_bit(UNLOADING, &base_vha->dpc_flags);
|
||||
|
||||
if (IS_QLAFX00(ha))
|
||||
@ -3645,6 +3653,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
|
||||
ha->eft = NULL;
|
||||
ha->eft_dma = 0;
|
||||
ha->fw_dumped = 0;
|
||||
ha->fw_dump_cap_flags = 0;
|
||||
ha->fw_dump_reading = 0;
|
||||
ha->fw_dump = NULL;
|
||||
ha->fw_dump_len = 0;
|
||||
@ -4913,12 +4922,13 @@ qla2x00_do_dpc(void *data)
|
||||
if (qlafx00_reset_initialize(base_vha)) {
|
||||
/* Failed. Abort isp later. */
|
||||
if (!test_bit(UNLOADING,
|
||||
&base_vha->dpc_flags))
|
||||
&base_vha->dpc_flags)) {
|
||||
set_bit(ISP_UNRECOVERABLE,
|
||||
&base_vha->dpc_flags);
|
||||
ql_dbg(ql_dbg_dpc, base_vha,
|
||||
0x4021,
|
||||
"Reset Recovery Failed\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -5077,8 +5087,10 @@ intr_on_check:
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
|
||||
if (test_and_clear_bit(BEACON_BLINK_NEEDED,
|
||||
&base_vha->dpc_flags))
|
||||
ha->isp_ops->beacon_blink(base_vha);
|
||||
&base_vha->dpc_flags)) {
|
||||
if (ha->beacon_blink_led == 1)
|
||||
ha->isp_ops->beacon_blink(base_vha);
|
||||
}
|
||||
|
||||
if (!IS_QLAFX00(ha))
|
||||
qla2x00_do_dpc_all_vps(base_vha);
|
||||
@ -5325,7 +5337,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
|
||||
#define FW_ISP82XX 7
|
||||
#define FW_ISP2031 8
|
||||
#define FW_ISP8031 9
|
||||
#define FW_ISP2071 10
|
||||
#define FW_ISP27XX 10
|
||||
|
||||
#define FW_FILE_ISP21XX "ql2100_fw.bin"
|
||||
#define FW_FILE_ISP22XX "ql2200_fw.bin"
|
||||
@ -5337,7 +5349,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
|
||||
#define FW_FILE_ISP82XX "ql8200_fw.bin"
|
||||
#define FW_FILE_ISP2031 "ql2600_fw.bin"
|
||||
#define FW_FILE_ISP8031 "ql8300_fw.bin"
|
||||
#define FW_FILE_ISP2071 "ql2700_fw.bin"
|
||||
#define FW_FILE_ISP27XX "ql2700_fw.bin"
|
||||
|
||||
|
||||
static DEFINE_MUTEX(qla_fw_lock);
|
||||
@ -5353,7 +5365,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
|
||||
{ .name = FW_FILE_ISP82XX, },
|
||||
{ .name = FW_FILE_ISP2031, },
|
||||
{ .name = FW_FILE_ISP8031, },
|
||||
{ .name = FW_FILE_ISP2071, },
|
||||
{ .name = FW_FILE_ISP27XX, },
|
||||
};
|
||||
|
||||
struct fw_blob *
|
||||
@ -5382,8 +5394,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
|
||||
blob = &qla_fw_blobs[FW_ISP2031];
|
||||
} else if (IS_QLA8031(ha)) {
|
||||
blob = &qla_fw_blobs[FW_ISP8031];
|
||||
} else if (IS_QLA2071(ha)) {
|
||||
blob = &qla_fw_blobs[FW_ISP2071];
|
||||
} else if (IS_QLA27XX(ha)) {
|
||||
blob = &qla_fw_blobs[FW_ISP27XX];
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
@ -5714,6 +5726,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
|
||||
{ 0 },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -1727,11 +1727,8 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
|
||||
if (IS_QLA2031(ha)) {
|
||||
led_select_value = qla83xx_select_led_port(ha);
|
||||
|
||||
qla83xx_wr_reg(vha, led_select_value, 0x40002000);
|
||||
qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000);
|
||||
msleep(1000);
|
||||
qla83xx_wr_reg(vha, led_select_value, 0x40004000);
|
||||
qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000);
|
||||
qla83xx_wr_reg(vha, led_select_value, 0x40000230);
|
||||
qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230);
|
||||
} else if (IS_QLA8031(ha)) {
|
||||
led_select_value = qla83xx_select_led_port(ha);
|
||||
|
||||
|
@ -182,6 +182,11 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
|
||||
void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
|
||||
struct atio_from_isp *atio)
|
||||
{
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe072,
|
||||
"%s: qla_target(%d): type %x ox_id %04x\n",
|
||||
__func__, vha->vp_idx, atio->u.raw.entry_type,
|
||||
be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
|
||||
|
||||
switch (atio->u.raw.entry_type) {
|
||||
case ATIO_TYPE7:
|
||||
{
|
||||
@ -236,6 +241,10 @@ void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
|
||||
void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
|
||||
{
|
||||
switch (pkt->entry_type) {
|
||||
case CTIO_CRC2:
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe073,
|
||||
"qla_target(%d):%s: CRC2 Response pkt\n",
|
||||
vha->vp_idx, __func__);
|
||||
case CTIO_TYPE7:
|
||||
{
|
||||
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
|
||||
@ -1350,13 +1359,42 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
|
||||
|
||||
prm->cmd->sg_mapped = 1;
|
||||
|
||||
/*
|
||||
* If greater than four sg entries then we need to allocate
|
||||
* the continuation entries
|
||||
*/
|
||||
if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
|
||||
prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
|
||||
prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
|
||||
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
|
||||
/*
|
||||
* If greater than four sg entries then we need to allocate
|
||||
* the continuation entries
|
||||
*/
|
||||
if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
|
||||
prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
|
||||
prm->tgt->datasegs_per_cmd,
|
||||
prm->tgt->datasegs_per_cont);
|
||||
} else {
|
||||
/* DIF */
|
||||
if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
|
||||
(cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
|
||||
prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
|
||||
prm->tot_dsds = prm->seg_cnt;
|
||||
} else
|
||||
prm->tot_dsds = prm->seg_cnt;
|
||||
|
||||
if (cmd->prot_sg_cnt) {
|
||||
prm->prot_sg = cmd->prot_sg;
|
||||
prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
|
||||
cmd->prot_sg, cmd->prot_sg_cnt,
|
||||
cmd->dma_data_direction);
|
||||
if (unlikely(prm->prot_seg_cnt == 0))
|
||||
goto out_err;
|
||||
|
||||
if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
|
||||
(cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
|
||||
/* Dif Bundling not support here */
|
||||
prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
|
||||
cmd->blk_sz);
|
||||
prm->tot_dsds += prm->prot_seg_cnt;
|
||||
} else
|
||||
prm->tot_dsds += prm->prot_seg_cnt;
|
||||
}
|
||||
}
|
||||
|
||||
ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
|
||||
prm->seg_cnt, prm->req_cnt);
|
||||
@ -1377,6 +1415,16 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
|
||||
BUG_ON(!cmd->sg_mapped);
|
||||
pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
|
||||
cmd->sg_mapped = 0;
|
||||
|
||||
if (cmd->prot_sg_cnt)
|
||||
pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
|
||||
cmd->dma_data_direction);
|
||||
|
||||
if (cmd->ctx_dsd_alloced)
|
||||
qla2x00_clean_dsd_pool(ha, NULL, cmd);
|
||||
|
||||
if (cmd->ctx)
|
||||
dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
|
||||
}
|
||||
|
||||
static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
|
||||
@ -1665,8 +1713,9 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
|
||||
return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
|
||||
}
|
||||
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
|
||||
vha->vp_idx, cmd->tag);
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n",
|
||||
vha->vp_idx, cmd->tag,
|
||||
be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
|
||||
|
||||
prm->cmd = cmd;
|
||||
prm->tgt = tgt;
|
||||
@ -1902,6 +1951,323 @@ skip_explict_conf:
|
||||
/* Sense with len > 24, is it possible ??? */
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* diff */
|
||||
static inline int
|
||||
qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
|
||||
{
|
||||
/*
|
||||
* Uncomment when corresponding SCSI changes are done.
|
||||
*
|
||||
if (!sp->cmd->prot_chk)
|
||||
return 0;
|
||||
*
|
||||
*/
|
||||
switch (se_cmd->prot_op) {
|
||||
case TARGET_PROT_DOUT_INSERT:
|
||||
case TARGET_PROT_DIN_STRIP:
|
||||
if (ql2xenablehba_err_chk >= 1)
|
||||
return 1;
|
||||
break;
|
||||
case TARGET_PROT_DOUT_PASS:
|
||||
case TARGET_PROT_DIN_PASS:
|
||||
if (ql2xenablehba_err_chk >= 2)
|
||||
return 1;
|
||||
break;
|
||||
case TARGET_PROT_DIN_INSERT:
|
||||
case TARGET_PROT_DOUT_STRIP:
|
||||
return 1;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
|
||||
*
|
||||
*/
|
||||
static inline void
|
||||
qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
|
||||
{
|
||||
uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
|
||||
|
||||
/* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
|
||||
* have been immplemented by TCM, before AppTag is avail.
|
||||
* Look for modesense_handlers[]
|
||||
*/
|
||||
ctx->app_tag = __constant_cpu_to_le16(0);
|
||||
ctx->app_tag_mask[0] = 0x0;
|
||||
ctx->app_tag_mask[1] = 0x0;
|
||||
|
||||
switch (se_cmd->prot_type) {
|
||||
case TARGET_DIF_TYPE0_PROT:
|
||||
/*
|
||||
* No check for ql2xenablehba_err_chk, as it would be an
|
||||
* I/O error if hba tag generation is not done.
|
||||
*/
|
||||
ctx->ref_tag = cpu_to_le32(lba);
|
||||
|
||||
if (!qlt_hba_err_chk_enabled(se_cmd))
|
||||
break;
|
||||
|
||||
/* enable ALL bytes of the ref tag */
|
||||
ctx->ref_tag_mask[0] = 0xff;
|
||||
ctx->ref_tag_mask[1] = 0xff;
|
||||
ctx->ref_tag_mask[2] = 0xff;
|
||||
ctx->ref_tag_mask[3] = 0xff;
|
||||
break;
|
||||
/*
|
||||
* For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
|
||||
* 16 bit app tag.
|
||||
*/
|
||||
case TARGET_DIF_TYPE1_PROT:
|
||||
ctx->ref_tag = cpu_to_le32(lba);
|
||||
|
||||
if (!qlt_hba_err_chk_enabled(se_cmd))
|
||||
break;
|
||||
|
||||
/* enable ALL bytes of the ref tag */
|
||||
ctx->ref_tag_mask[0] = 0xff;
|
||||
ctx->ref_tag_mask[1] = 0xff;
|
||||
ctx->ref_tag_mask[2] = 0xff;
|
||||
ctx->ref_tag_mask[3] = 0xff;
|
||||
break;
|
||||
/*
|
||||
* For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
|
||||
* match LBA in CDB + N
|
||||
*/
|
||||
case TARGET_DIF_TYPE2_PROT:
|
||||
ctx->ref_tag = cpu_to_le32(lba);
|
||||
|
||||
if (!qlt_hba_err_chk_enabled(se_cmd))
|
||||
break;
|
||||
|
||||
/* enable ALL bytes of the ref tag */
|
||||
ctx->ref_tag_mask[0] = 0xff;
|
||||
ctx->ref_tag_mask[1] = 0xff;
|
||||
ctx->ref_tag_mask[2] = 0xff;
|
||||
ctx->ref_tag_mask[3] = 0xff;
|
||||
break;
|
||||
|
||||
/* For Type 3 protection: 16 bit GUARD only */
|
||||
case TARGET_DIF_TYPE3_PROT:
|
||||
ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
|
||||
ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline int
|
||||
qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
|
||||
{
|
||||
uint32_t *cur_dsd;
|
||||
int sgc;
|
||||
uint32_t transfer_length = 0;
|
||||
uint32_t data_bytes;
|
||||
uint32_t dif_bytes;
|
||||
uint8_t bundling = 1;
|
||||
uint8_t *clr_ptr;
|
||||
struct crc_context *crc_ctx_pkt = NULL;
|
||||
struct qla_hw_data *ha;
|
||||
struct ctio_crc2_to_fw *pkt;
|
||||
dma_addr_t crc_ctx_dma;
|
||||
uint16_t fw_prot_opts = 0;
|
||||
struct qla_tgt_cmd *cmd = prm->cmd;
|
||||
struct se_cmd *se_cmd = &cmd->se_cmd;
|
||||
uint32_t h;
|
||||
struct atio_from_isp *atio = &prm->cmd->atio;
|
||||
|
||||
sgc = 0;
|
||||
ha = vha->hw;
|
||||
|
||||
pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
|
||||
prm->pkt = pkt;
|
||||
memset(pkt, 0, sizeof(*pkt));
|
||||
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe071,
|
||||
"qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
|
||||
vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
|
||||
prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
|
||||
|
||||
if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
|
||||
(se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
|
||||
bundling = 0;
|
||||
|
||||
/* Compute dif len and adjust data len to incude protection */
|
||||
data_bytes = cmd->bufflen;
|
||||
dif_bytes = (data_bytes / cmd->blk_sz) * 8;
|
||||
|
||||
switch (se_cmd->prot_op) {
|
||||
case TARGET_PROT_DIN_INSERT:
|
||||
case TARGET_PROT_DOUT_STRIP:
|
||||
transfer_length = data_bytes;
|
||||
data_bytes += dif_bytes;
|
||||
break;
|
||||
|
||||
case TARGET_PROT_DIN_STRIP:
|
||||
case TARGET_PROT_DOUT_INSERT:
|
||||
case TARGET_PROT_DIN_PASS:
|
||||
case TARGET_PROT_DOUT_PASS:
|
||||
transfer_length = data_bytes + dif_bytes;
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
if (!qlt_hba_err_chk_enabled(se_cmd))
|
||||
fw_prot_opts |= 0x10; /* Disable Guard tag checking */
|
||||
/* HBA error checking enabled */
|
||||
else if (IS_PI_UNINIT_CAPABLE(ha)) {
|
||||
if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
|
||||
(se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
|
||||
fw_prot_opts |= PO_DIS_VALD_APP_ESC;
|
||||
else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
|
||||
fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
|
||||
}
|
||||
|
||||
switch (se_cmd->prot_op) {
|
||||
case TARGET_PROT_DIN_INSERT:
|
||||
case TARGET_PROT_DOUT_INSERT:
|
||||
fw_prot_opts |= PO_MODE_DIF_INSERT;
|
||||
break;
|
||||
case TARGET_PROT_DIN_STRIP:
|
||||
case TARGET_PROT_DOUT_STRIP:
|
||||
fw_prot_opts |= PO_MODE_DIF_REMOVE;
|
||||
break;
|
||||
case TARGET_PROT_DIN_PASS:
|
||||
case TARGET_PROT_DOUT_PASS:
|
||||
fw_prot_opts |= PO_MODE_DIF_PASS;
|
||||
/* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
|
||||
break;
|
||||
default:/* Normal Request */
|
||||
fw_prot_opts |= PO_MODE_DIF_PASS;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
/* ---- PKT ---- */
|
||||
/* Update entry type to indicate Command Type CRC_2 IOCB */
|
||||
pkt->entry_type = CTIO_CRC2;
|
||||
pkt->entry_count = 1;
|
||||
pkt->vp_index = vha->vp_idx;
|
||||
|
||||
h = qlt_make_handle(vha);
|
||||
if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
|
||||
/*
|
||||
* CTIO type 7 from the firmware doesn't provide a way to
|
||||
* know the initiator's LOOP ID, hence we can't find
|
||||
* the session and, so, the command.
|
||||
*/
|
||||
return -EAGAIN;
|
||||
} else
|
||||
ha->tgt.cmds[h-1] = prm->cmd;
|
||||
|
||||
|
||||
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
|
||||
pkt->nport_handle = prm->cmd->loop_id;
|
||||
pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
|
||||
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
|
||||
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
|
||||
pkt->exchange_addr = atio->u.isp24.exchange_addr;
|
||||
pkt->ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
|
||||
pkt->flags |= (atio->u.isp24.attr << 9);
|
||||
pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
|
||||
|
||||
/* Set transfer direction */
|
||||
if (cmd->dma_data_direction == DMA_TO_DEVICE)
|
||||
pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
|
||||
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
|
||||
pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
|
||||
|
||||
|
||||
pkt->dseg_count = prm->tot_dsds;
|
||||
/* Fibre channel byte count */
|
||||
pkt->transfer_length = cpu_to_le32(transfer_length);
|
||||
|
||||
|
||||
/* ----- CRC context -------- */
|
||||
|
||||
/* Allocate CRC context from global pool */
|
||||
crc_ctx_pkt = cmd->ctx =
|
||||
dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
|
||||
|
||||
if (!crc_ctx_pkt)
|
||||
goto crc_queuing_error;
|
||||
|
||||
/* Zero out CTX area. */
|
||||
clr_ptr = (uint8_t *)crc_ctx_pkt;
|
||||
memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
|
||||
|
||||
crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
|
||||
INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
|
||||
|
||||
/* Set handle */
|
||||
crc_ctx_pkt->handle = pkt->handle;
|
||||
|
||||
qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
|
||||
|
||||
pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
|
||||
pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
|
||||
pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
|
||||
|
||||
|
||||
if (!bundling) {
|
||||
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
|
||||
} else {
|
||||
/*
|
||||
* Configure Bundling if we need to fetch interlaving
|
||||
* protection PCI accesses
|
||||
*/
|
||||
fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
|
||||
crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
|
||||
crc_ctx_pkt->u.bundling.dseg_count =
|
||||
cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
|
||||
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
|
||||
}
|
||||
|
||||
/* Finish the common fields of CRC pkt */
|
||||
crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
|
||||
crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
|
||||
crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
|
||||
crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
|
||||
|
||||
|
||||
/* Walks data segments */
|
||||
pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
|
||||
|
||||
if (!bundling && prm->prot_seg_cnt) {
|
||||
if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
|
||||
prm->tot_dsds, cmd))
|
||||
goto crc_queuing_error;
|
||||
} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
|
||||
(prm->tot_dsds - prm->prot_seg_cnt), cmd))
|
||||
goto crc_queuing_error;
|
||||
|
||||
if (bundling && prm->prot_seg_cnt) {
|
||||
/* Walks dif segments */
|
||||
pkt->add_flags |=
|
||||
__constant_cpu_to_le16(CTIO_CRC2_AF_DIF_DSD_ENA);
|
||||
|
||||
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
|
||||
if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
|
||||
prm->prot_seg_cnt, cmd))
|
||||
goto crc_queuing_error;
|
||||
}
|
||||
return QLA_SUCCESS;
|
||||
|
||||
crc_queuing_error:
|
||||
/* Cleanup will be performed by the caller */
|
||||
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
|
||||
* QLA_TGT_XMIT_STATUS for >= 24xx silicon
|
||||
@ -1921,9 +2287,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
||||
qlt_check_srr_debug(cmd, &xmit_type);
|
||||
|
||||
ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
|
||||
"is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
|
||||
"cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
|
||||
1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
|
||||
"is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
|
||||
(xmit_type & QLA_TGT_XMIT_STATUS) ?
|
||||
1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
|
||||
&cmd->se_cmd);
|
||||
|
||||
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
|
||||
&full_req_cnt);
|
||||
@ -1941,7 +2308,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
||||
if (unlikely(res))
|
||||
goto out_unmap_unlock;
|
||||
|
||||
res = qlt_24xx_build_ctio_pkt(&prm, vha);
|
||||
if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
|
||||
res = qlt_build_ctio_crc2_pkt(&prm, vha);
|
||||
else
|
||||
res = qlt_24xx_build_ctio_pkt(&prm, vha);
|
||||
if (unlikely(res != 0))
|
||||
goto out_unmap_unlock;
|
||||
|
||||
@ -1953,7 +2323,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
||||
__constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
|
||||
CTIO7_FLAGS_STATUS_MODE_0);
|
||||
|
||||
qlt_load_data_segments(&prm, vha);
|
||||
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
|
||||
qlt_load_data_segments(&prm, vha);
|
||||
|
||||
if (prm.add_status_pkt == 0) {
|
||||
if (xmit_type & QLA_TGT_XMIT_STATUS) {
|
||||
@ -1983,8 +2354,14 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe019,
|
||||
"Building additional status packet\n");
|
||||
|
||||
/*
|
||||
* T10Dif: ctio_crc2_to_fw overlay ontop of
|
||||
* ctio7_to_24xx
|
||||
*/
|
||||
memcpy(ctio, pkt, sizeof(*ctio));
|
||||
/* reset back to CTIO7 */
|
||||
ctio->entry_count = 1;
|
||||
ctio->entry_type = CTIO_TYPE7;
|
||||
ctio->dseg_count = 0;
|
||||
ctio->u.status1.flags &= ~__constant_cpu_to_le16(
|
||||
CTIO7_FLAGS_DATA_IN);
|
||||
@ -1993,6 +2370,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
||||
pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
|
||||
pkt->u.status0.flags |= __constant_cpu_to_le16(
|
||||
CTIO7_FLAGS_DONT_RET_CTIO);
|
||||
|
||||
/* qlt_24xx_init_ctio_to_isp will correct
|
||||
* all neccessary fields that's part of CTIO7.
|
||||
* There should be no residual of CTIO-CRC2 data.
|
||||
*/
|
||||
qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
|
||||
&prm);
|
||||
pr_debug("Status CTIO7: %p\n", ctio);
|
||||
@ -2041,8 +2423,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
|
||||
if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
|
||||
return -EIO;
|
||||
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
|
||||
(int)vha->vp_idx);
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe01b,
|
||||
"%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n",
|
||||
__func__, (int)vha->vp_idx, &cmd->se_cmd,
|
||||
be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
|
||||
|
||||
/* Calculate number of entries and segments required */
|
||||
if (qlt_pci_map_calc_cnt(&prm) != 0)
|
||||
@ -2054,14 +2438,19 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
|
||||
res = qlt_check_reserve_free_req(vha, prm.req_cnt);
|
||||
if (res != 0)
|
||||
goto out_unlock_free_unmap;
|
||||
if (cmd->se_cmd.prot_op)
|
||||
res = qlt_build_ctio_crc2_pkt(&prm, vha);
|
||||
else
|
||||
res = qlt_24xx_build_ctio_pkt(&prm, vha);
|
||||
|
||||
res = qlt_24xx_build_ctio_pkt(&prm, vha);
|
||||
if (unlikely(res != 0))
|
||||
goto out_unlock_free_unmap;
|
||||
pkt = (struct ctio7_to_24xx *)prm.pkt;
|
||||
pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
|
||||
CTIO7_FLAGS_STATUS_MODE_0);
|
||||
qlt_load_data_segments(&prm, vha);
|
||||
|
||||
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
|
||||
qlt_load_data_segments(&prm, vha);
|
||||
|
||||
cmd->state = QLA_TGT_STATE_NEED_DATA;
|
||||
|
||||
@ -2079,6 +2468,143 @@ out_unlock_free_unmap:
|
||||
}
|
||||
EXPORT_SYMBOL(qlt_rdy_to_xfer);
|
||||
|
||||
|
||||
/*
|
||||
* Checks the guard or meta-data for the type of error
|
||||
* detected by the HBA.
|
||||
*/
|
||||
static inline int
|
||||
qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
|
||||
struct ctio_crc_from_fw *sts)
|
||||
{
|
||||
uint8_t *ap = &sts->actual_dif[0];
|
||||
uint8_t *ep = &sts->expected_dif[0];
|
||||
uint32_t e_ref_tag, a_ref_tag;
|
||||
uint16_t e_app_tag, a_app_tag;
|
||||
uint16_t e_guard, a_guard;
|
||||
uint64_t lba = cmd->se_cmd.t_task_lba;
|
||||
|
||||
a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
|
||||
a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
|
||||
a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
|
||||
|
||||
e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
|
||||
e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
|
||||
e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
|
||||
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe075,
|
||||
"iocb(s) %p Returned STATUS.\n", sts);
|
||||
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xf075,
|
||||
"dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
|
||||
cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
|
||||
a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
|
||||
|
||||
/*
|
||||
* Ignore sector if:
|
||||
* For type 3: ref & app tag is all 'f's
|
||||
* For type 0,1,2: app tag is all 'f's
|
||||
*/
|
||||
if ((a_app_tag == 0xffff) &&
|
||||
((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
|
||||
(a_ref_tag == 0xffffffff))) {
|
||||
uint32_t blocks_done;
|
||||
|
||||
/* 2TB boundary case covered automatically with this */
|
||||
blocks_done = e_ref_tag - (uint32_t)lba + 1;
|
||||
cmd->se_cmd.bad_sector = e_ref_tag;
|
||||
cmd->se_cmd.pi_err = 0;
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xf074,
|
||||
"need to return scsi good\n");
|
||||
|
||||
/* Update protection tag */
|
||||
if (cmd->prot_sg_cnt) {
|
||||
uint32_t i, j = 0, k = 0, num_ent;
|
||||
struct scatterlist *sg, *sgl;
|
||||
|
||||
|
||||
sgl = cmd->prot_sg;
|
||||
|
||||
/* Patch the corresponding protection tags */
|
||||
for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
|
||||
num_ent = sg_dma_len(sg) / 8;
|
||||
if (k + num_ent < blocks_done) {
|
||||
k += num_ent;
|
||||
continue;
|
||||
}
|
||||
j = blocks_done - k - 1;
|
||||
k = blocks_done;
|
||||
break;
|
||||
}
|
||||
|
||||
if (k != blocks_done) {
|
||||
ql_log(ql_log_warn, vha, 0xf076,
|
||||
"unexpected tag values tag:lba=%u:%llu)\n",
|
||||
e_ref_tag, (unsigned long long)lba);
|
||||
goto out;
|
||||
}
|
||||
|
||||
#if 0
|
||||
struct sd_dif_tuple *spt;
|
||||
/* TODO:
|
||||
* This section came from initiator. Is it valid here?
|
||||
* should ulp be override with actual val???
|
||||
*/
|
||||
spt = page_address(sg_page(sg)) + sg->offset;
|
||||
spt += j;
|
||||
|
||||
spt->app_tag = 0xffff;
|
||||
if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
|
||||
spt->ref_tag = 0xffffffff;
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* check guard */
|
||||
if (e_guard != a_guard) {
|
||||
cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
|
||||
cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
|
||||
|
||||
ql_log(ql_log_warn, vha, 0xe076,
|
||||
"Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
|
||||
cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
|
||||
a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
|
||||
a_guard, e_guard, cmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* check ref tag */
|
||||
if (e_ref_tag != a_ref_tag) {
|
||||
cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
|
||||
cmd->se_cmd.bad_sector = e_ref_tag;
|
||||
|
||||
ql_log(ql_log_warn, vha, 0xe077,
|
||||
"Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
|
||||
cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
|
||||
a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
|
||||
a_guard, e_guard, cmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* check appl tag */
|
||||
if (e_app_tag != a_app_tag) {
|
||||
cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
|
||||
cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
|
||||
|
||||
ql_log(ql_log_warn, vha, 0xe078,
|
||||
"App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
|
||||
cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
|
||||
a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
|
||||
a_guard, e_guard, cmd);
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/* If hardware_lock held on entry, might drop it, then reaquire */
|
||||
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
|
||||
static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
|
||||
@ -2155,18 +2681,36 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
|
||||
rc = __qlt_send_term_exchange(vha, cmd, atio);
|
||||
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
|
||||
done:
|
||||
if (rc == 1) {
|
||||
/*
|
||||
* Terminate exchange will tell fw to release any active CTIO
|
||||
* that's in FW posession and cleanup the exchange.
|
||||
*
|
||||
* "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still
|
||||
* down at FW. Free the cmd later when CTIO comes back later
|
||||
* w/aborted(0x2) status.
|
||||
*
|
||||
* "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already
|
||||
* back w/some err. Free the cmd now.
|
||||
*/
|
||||
if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) {
|
||||
if (!ha_locked && !in_interrupt())
|
||||
msleep(250); /* just in case */
|
||||
|
||||
if (cmd->sg_mapped)
|
||||
qlt_unmap_sg(vha, cmd);
|
||||
vha->hw->tgt.tgt_ops->free_cmd(cmd);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
|
||||
{
|
||||
BUG_ON(cmd->sg_mapped);
|
||||
ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
|
||||
"%s: se_cmd[%p] ox_id %04x\n",
|
||||
__func__, &cmd->se_cmd,
|
||||
be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
|
||||
|
||||
BUG_ON(cmd->sg_mapped);
|
||||
if (unlikely(cmd->free_sg))
|
||||
kfree(cmd->sg);
|
||||
kmem_cache_free(qla_tgt_cmd_cachep, cmd);
|
||||
@ -2374,6 +2918,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
|
||||
case CTIO_LIP_RESET:
|
||||
case CTIO_TARGET_RESET:
|
||||
case CTIO_ABORTED:
|
||||
/* driver request abort via Terminate exchange */
|
||||
case CTIO_TIMEOUT:
|
||||
case CTIO_INVALID_RX_ID:
|
||||
/* They are OK */
|
||||
@ -2404,18 +2949,58 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
|
||||
else
|
||||
return;
|
||||
|
||||
case CTIO_DIF_ERROR: {
|
||||
struct ctio_crc_from_fw *crc =
|
||||
(struct ctio_crc_from_fw *)ctio;
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
|
||||
"qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
|
||||
vha->vp_idx, status, cmd->state, se_cmd,
|
||||
*((u64 *)&crc->actual_dif[0]),
|
||||
*((u64 *)&crc->expected_dif[0]));
|
||||
|
||||
if (qlt_handle_dif_error(vha, cmd, ctio)) {
|
||||
if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
|
||||
/* scsi Write/xfer rdy complete */
|
||||
goto skip_term;
|
||||
} else {
|
||||
/* scsi read/xmit respond complete
|
||||
* call handle dif to send scsi status
|
||||
* rather than terminate exchange.
|
||||
*/
|
||||
cmd->state = QLA_TGT_STATE_PROCESSED;
|
||||
ha->tgt.tgt_ops->handle_dif_err(cmd);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
/* Need to generate a SCSI good completion.
|
||||
* because FW did not send scsi status.
|
||||
*/
|
||||
status = 0;
|
||||
goto skip_term;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
|
||||
"qla_target(%d): CTIO with error status "
|
||||
"0x%x received (state %x, se_cmd %p\n",
|
||||
"qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
|
||||
vha->vp_idx, status, cmd->state, se_cmd);
|
||||
break;
|
||||
}
|
||||
|
||||
if (cmd->state != QLA_TGT_STATE_NEED_DATA)
|
||||
|
||||
/* "cmd->state == QLA_TGT_STATE_ABORTED" means
|
||||
* cmd is already aborted/terminated, we don't
|
||||
* need to terminate again. The exchange is already
|
||||
* cleaned up/freed at FW level. Just cleanup at driver
|
||||
* level.
|
||||
*/
|
||||
if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
|
||||
(cmd->state != QLA_TGT_STATE_ABORTED)) {
|
||||
if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
|
||||
return;
|
||||
}
|
||||
}
|
||||
skip_term:
|
||||
|
||||
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
|
||||
@ -2444,7 +3029,8 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
|
||||
"not return a CTIO complete\n", vha->vp_idx, cmd->state);
|
||||
}
|
||||
|
||||
if (unlikely(status != CTIO_SUCCESS)) {
|
||||
if (unlikely(status != CTIO_SUCCESS) &&
|
||||
(cmd->state != QLA_TGT_STATE_ABORTED)) {
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
|
||||
dump_stack();
|
||||
}
|
||||
@ -2563,8 +3149,9 @@ static void qlt_do_work(struct work_struct *work)
|
||||
atio->u.isp24.fcp_cmnd.add_cdb_len]));
|
||||
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe022,
|
||||
"qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
|
||||
cmd, cmd->unpacked_lun, cmd->tag);
|
||||
"qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n",
|
||||
cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
|
||||
cmd->atio.u.isp24.fcp_hdr.ox_id);
|
||||
|
||||
ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
|
||||
fcp_task_attr, data_dir, bidi);
|
||||
@ -3527,11 +4114,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
||||
switch (atio->u.raw.entry_type) {
|
||||
case ATIO_TYPE7:
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe02d,
|
||||
"ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
|
||||
"add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
|
||||
"ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",
|
||||
vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
|
||||
atio->u.isp24.fcp_cmnd.rddata,
|
||||
atio->u.isp24.fcp_cmnd.wrdata,
|
||||
atio->u.isp24.fcp_cmnd.cdb[0],
|
||||
atio->u.isp24.fcp_cmnd.add_cdb_len,
|
||||
be32_to_cpu(get_unaligned((uint32_t *)
|
||||
&atio->u.isp24.fcp_cmnd.add_cdb[
|
||||
@ -3629,11 +4216,13 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
|
||||
tgt->irq_cmd_count++;
|
||||
|
||||
switch (pkt->entry_type) {
|
||||
case CTIO_CRC2:
|
||||
case CTIO_TYPE7:
|
||||
{
|
||||
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
|
||||
vha->vp_idx);
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe030,
|
||||
"CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n",
|
||||
entry->entry_type, vha->vp_idx);
|
||||
qlt_do_ctio_completion(vha, entry->handle,
|
||||
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
|
||||
entry);
|
||||
@ -4768,6 +5357,7 @@ qlt_24xx_process_response_error(struct scsi_qla_host *vha,
|
||||
case ABTS_RESP_24XX:
|
||||
case CTIO_TYPE7:
|
||||
case NOTIFY_ACK_TYPE:
|
||||
case CTIO_CRC2:
|
||||
return 1;
|
||||
default:
|
||||
return 0;
|
||||
|
@ -293,6 +293,7 @@ struct ctio_to_2xxx {
|
||||
#define CTIO_ABORTED 0x02
|
||||
#define CTIO_INVALID_RX_ID 0x08
|
||||
#define CTIO_TIMEOUT 0x0B
|
||||
#define CTIO_DIF_ERROR 0x0C /* DIF error detected */
|
||||
#define CTIO_LIP_RESET 0x0E
|
||||
#define CTIO_TARGET_RESET 0x17
|
||||
#define CTIO_PORT_UNAVAILABLE 0x28
|
||||
@ -498,11 +499,12 @@ struct ctio7_from_24xx {
|
||||
#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
|
||||
#define CTIO7_FLAGS_STATUS_MODE_0 0
|
||||
#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
|
||||
#define CTIO7_FLAGS_STATUS_MODE_2 BIT_7
|
||||
#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
|
||||
#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
|
||||
#define CTIO7_FLAGS_DSD_PTR BIT_2
|
||||
#define CTIO7_FLAGS_DATA_IN BIT_1
|
||||
#define CTIO7_FLAGS_DATA_OUT BIT_0
|
||||
#define CTIO7_FLAGS_DATA_IN BIT_1 /* data to initiator */
|
||||
#define CTIO7_FLAGS_DATA_OUT BIT_0 /* data from initiator */
|
||||
|
||||
#define ELS_PLOGI 0x3
|
||||
#define ELS_FLOGI 0x4
|
||||
@ -513,6 +515,68 @@ struct ctio7_from_24xx {
|
||||
#define ELS_PDISC 0x50
|
||||
#define ELS_ADISC 0x52
|
||||
|
||||
/*
|
||||
*CTIO Type CRC_2 IOCB
|
||||
*/
|
||||
struct ctio_crc2_to_fw {
|
||||
uint8_t entry_type; /* Entry type. */
|
||||
#define CTIO_CRC2 0x7A
|
||||
uint8_t entry_count; /* Entry count. */
|
||||
uint8_t sys_define; /* System defined. */
|
||||
uint8_t entry_status; /* Entry Status. */
|
||||
|
||||
uint32_t handle; /* System handle. */
|
||||
uint16_t nport_handle; /* N_PORT handle. */
|
||||
uint16_t timeout; /* Command timeout. */
|
||||
|
||||
uint16_t dseg_count; /* Data segment count. */
|
||||
uint8_t vp_index;
|
||||
uint8_t add_flags; /* additional flags */
|
||||
#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
|
||||
|
||||
uint8_t initiator_id[3]; /* initiator ID */
|
||||
uint8_t reserved1;
|
||||
uint32_t exchange_addr; /* rcv exchange address */
|
||||
uint16_t reserved2;
|
||||
uint16_t flags; /* refer to CTIO7 flags values */
|
||||
uint32_t residual;
|
||||
uint16_t ox_id;
|
||||
uint16_t scsi_status;
|
||||
uint32_t relative_offset;
|
||||
uint32_t reserved5;
|
||||
uint32_t transfer_length; /* total fc transfer length */
|
||||
uint32_t reserved6;
|
||||
uint32_t crc_context_address[2];/* Data segment address. */
|
||||
uint16_t crc_context_len; /* Data segment length. */
|
||||
uint16_t reserved_1; /* MUST be set to 0. */
|
||||
} __packed;
|
||||
|
||||
/* CTIO Type CRC_x Status IOCB */
|
||||
struct ctio_crc_from_fw {
|
||||
uint8_t entry_type; /* Entry type. */
|
||||
uint8_t entry_count; /* Entry count. */
|
||||
uint8_t sys_define; /* System defined. */
|
||||
uint8_t entry_status; /* Entry Status. */
|
||||
|
||||
uint32_t handle; /* System handle. */
|
||||
uint16_t status;
|
||||
uint16_t timeout; /* Command timeout. */
|
||||
uint16_t dseg_count; /* Data segment count. */
|
||||
uint32_t reserved1;
|
||||
uint16_t state_flags;
|
||||
#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
|
||||
|
||||
uint32_t exchange_address; /* rcv exchange address */
|
||||
uint16_t reserved2;
|
||||
uint16_t flags;
|
||||
uint32_t resid_xfer_length;
|
||||
uint16_t ox_id;
|
||||
uint8_t reserved3[12];
|
||||
uint16_t runt_guard; /* reported runt blk guard */
|
||||
uint8_t actual_dif[8];
|
||||
uint8_t expected_dif[8];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* ISP queue - ABTS received/response entries structure definition for 24xx.
|
||||
*/
|
||||
@ -641,6 +705,7 @@ struct qla_tgt_func_tmpl {
|
||||
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
|
||||
unsigned char *, uint32_t, int, int, int);
|
||||
void (*handle_data)(struct qla_tgt_cmd *);
|
||||
void (*handle_dif_err)(struct qla_tgt_cmd *);
|
||||
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
|
||||
uint32_t);
|
||||
void (*free_cmd)(struct qla_tgt_cmd *);
|
||||
@ -829,9 +894,9 @@ struct qla_tgt_sess {
|
||||
};
|
||||
|
||||
struct qla_tgt_cmd {
|
||||
struct se_cmd se_cmd;
|
||||
struct qla_tgt_sess *sess;
|
||||
int state;
|
||||
struct se_cmd se_cmd;
|
||||
struct work_struct free_work;
|
||||
struct work_struct work;
|
||||
/* Sense buffer that will be mapped into outgoing status */
|
||||
@ -843,6 +908,7 @@ struct qla_tgt_cmd {
|
||||
unsigned int free_sg:1;
|
||||
unsigned int aborted:1; /* Needed in case of SRR */
|
||||
unsigned int write_data_transferred:1;
|
||||
unsigned int ctx_dsd_alloced:1;
|
||||
|
||||
struct scatterlist *sg; /* cmd data buffer SG vector */
|
||||
int sg_cnt; /* SG segments count */
|
||||
@ -857,6 +923,12 @@ struct qla_tgt_cmd {
|
||||
struct scsi_qla_host *vha;
|
||||
|
||||
struct atio_from_isp atio;
|
||||
/* t10dif */
|
||||
struct scatterlist *prot_sg;
|
||||
uint32_t prot_sg_cnt;
|
||||
uint32_t blk_sz;
|
||||
struct crc_context *ctx;
|
||||
|
||||
};
|
||||
|
||||
struct qla_tgt_sess_work_param {
|
||||
@ -901,6 +973,10 @@ struct qla_tgt_prm {
|
||||
int sense_buffer_len;
|
||||
int residual;
|
||||
int add_status_pkt;
|
||||
/* dif */
|
||||
struct scatterlist *prot_sg;
|
||||
uint16_t prot_seg_cnt;
|
||||
uint16_t tot_dsds;
|
||||
};
|
||||
|
||||
struct qla_tgt_srr_imm {
|
||||
@ -976,6 +1052,8 @@ extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
|
||||
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
|
||||
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
|
||||
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
|
||||
extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *);
|
||||
extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t);
|
||||
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
|
||||
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
|
||||
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -201,7 +201,6 @@ qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
|
||||
ql_dbg(ql_dbg_misc, NULL, 0xd014,
|
||||
"%s: @%x\n", __func__, offset);
|
||||
}
|
||||
qla27xx_insert32(offset, buf, len);
|
||||
qla27xx_read32(window, buf, len);
|
||||
}
|
||||
|
||||
@ -220,7 +219,7 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
|
||||
|
||||
static inline void
|
||||
qla27xx_read_window(__iomem struct device_reg_24xx *reg,
|
||||
uint32_t base, uint offset, uint count, uint width, void *buf,
|
||||
uint32_t addr, uint offset, uint count, uint width, void *buf,
|
||||
ulong *len)
|
||||
{
|
||||
void *window = (void *)reg + offset;
|
||||
@ -229,14 +228,14 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
|
||||
if (buf) {
|
||||
ql_dbg(ql_dbg_misc, NULL, 0xd016,
|
||||
"%s: base=%x offset=%x count=%x width=%x\n",
|
||||
__func__, base, offset, count, width);
|
||||
__func__, addr, offset, count, width);
|
||||
}
|
||||
qla27xx_write_reg(reg, IOBASE_ADDR, base, buf);
|
||||
qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
|
||||
while (count--) {
|
||||
qla27xx_insert32(base, buf, len);
|
||||
qla27xx_insert32(addr, buf, len);
|
||||
readn(window, buf, len);
|
||||
window += width;
|
||||
base += width;
|
||||
addr++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -336,7 +335,8 @@ qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
|
||||
|
||||
ql_dbg(ql_dbg_misc, vha, 0xd204,
|
||||
"%s: rdpci [%lx]\n", __func__, *len);
|
||||
qla27xx_read_reg(reg, ent->t260.pci_addr, buf, len);
|
||||
qla27xx_insert32(ent->t260.pci_offset, buf, len);
|
||||
qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -349,7 +349,7 @@ qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
|
||||
|
||||
ql_dbg(ql_dbg_misc, vha, 0xd205,
|
||||
"%s: wrpci [%lx]\n", __func__, *len);
|
||||
qla27xx_write_reg(reg, ent->t261.pci_addr, ent->t261.write_data, buf);
|
||||
qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -392,9 +392,9 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (end < start) {
|
||||
if (end < start || end == 0) {
|
||||
ql_dbg(ql_dbg_misc, vha, 0xd023,
|
||||
"%s: bad range (start=%x end=%x)\n", __func__,
|
||||
"%s: unusable range (start=%x end=%x)\n", __func__,
|
||||
ent->t262.end_addr, ent->t262.start_addr);
|
||||
qla27xx_skip_entry(ent, buf);
|
||||
goto done;
|
||||
@ -452,17 +452,15 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
|
||||
ql_dbg(ql_dbg_misc, vha, 0xd025,
|
||||
"%s: unsupported atio queue\n", __func__);
|
||||
qla27xx_skip_entry(ent, buf);
|
||||
goto done;
|
||||
} else {
|
||||
ql_dbg(ql_dbg_misc, vha, 0xd026,
|
||||
"%s: unknown queue %u\n", __func__, ent->t263.queue_type);
|
||||
qla27xx_skip_entry(ent, buf);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (buf)
|
||||
ent->t263.num_queues = count;
|
||||
done:
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -503,7 +501,7 @@ qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
|
||||
ql_dbg(ql_dbg_misc, vha, 0xd209,
|
||||
"%s: pause risc [%lx]\n", __func__, *len);
|
||||
if (buf)
|
||||
qla24xx_pause_risc(reg);
|
||||
qla24xx_pause_risc(reg, vha->hw);
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -590,7 +588,6 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
|
||||
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
|
||||
{
|
||||
struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
|
||||
void *window = (void *)reg + 0xc4;
|
||||
ulong dwords = ent->t270.count;
|
||||
ulong addr = ent->t270.addr;
|
||||
|
||||
@ -599,10 +596,9 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
|
||||
qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
|
||||
while (dwords--) {
|
||||
qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
|
||||
qla27xx_read_reg(reg, 0xc4, buf, len);
|
||||
qla27xx_insert32(addr, buf, len);
|
||||
qla27xx_read32(window, buf, len);
|
||||
addr++;
|
||||
qla27xx_read_reg(reg, 0xc4, buf, len);
|
||||
addr += sizeof(uint32_t);
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -614,12 +610,12 @@ qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
|
||||
{
|
||||
struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
|
||||
ulong addr = ent->t271.addr;
|
||||
ulong data = ent->t271.data;
|
||||
|
||||
ql_dbg(ql_dbg_misc, vha, 0xd20f,
|
||||
"%s: wrremreg [%lx]\n", __func__, *len);
|
||||
qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
|
||||
qla27xx_read_reg(reg, 0xc4, buf, len);
|
||||
qla27xx_insert32(addr, buf, len);
|
||||
qla27xx_write_reg(reg, 0xc4, data, buf);
|
||||
qla27xx_write_reg(reg, 0xc0, addr, buf);
|
||||
|
||||
return false;
|
||||
@ -662,12 +658,62 @@ qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
|
||||
"%s: failed pcicfg read at %lx\n", __func__, addr);
|
||||
qla27xx_insert32(addr, buf, len);
|
||||
qla27xx_insert32(value, buf, len);
|
||||
addr += 4;
|
||||
addr += sizeof(uint32_t);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
|
||||
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
|
||||
{
|
||||
uint count = 0;
|
||||
uint i;
|
||||
|
||||
ql_dbg(ql_dbg_misc, vha, 0xd212,
|
||||
"%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
|
||||
if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
|
||||
for (i = 0; i < vha->hw->max_req_queues; i++) {
|
||||
struct req_que *req = vha->hw->req_q_map[i];
|
||||
if (req || !buf) {
|
||||
qla27xx_insert16(i, buf, len);
|
||||
qla27xx_insert16(1, buf, len);
|
||||
qla27xx_insert32(req && req->out_ptr ?
|
||||
*req->out_ptr : 0, buf, len);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
|
||||
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
|
||||
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
|
||||
if (rsp || !buf) {
|
||||
qla27xx_insert16(i, buf, len);
|
||||
qla27xx_insert16(1, buf, len);
|
||||
qla27xx_insert32(rsp && rsp->in_ptr ?
|
||||
*rsp->in_ptr : 0, buf, len);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
} else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
|
||||
ql_dbg(ql_dbg_misc, vha, 0xd02e,
|
||||
"%s: unsupported atio queue\n", __func__);
|
||||
qla27xx_skip_entry(ent, buf);
|
||||
} else {
|
||||
ql_dbg(ql_dbg_misc, vha, 0xd02f,
|
||||
"%s: unknown queue %u\n", __func__, ent->t274.queue_type);
|
||||
qla27xx_skip_entry(ent, buf);
|
||||
}
|
||||
|
||||
if (buf)
|
||||
ent->t274.num_queues = count;
|
||||
|
||||
if (!count)
|
||||
qla27xx_skip_entry(ent, buf);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
|
||||
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
|
||||
@ -709,6 +755,7 @@ static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
|
||||
{ ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
|
||||
{ ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
|
||||
{ ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
|
||||
{ ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } ,
|
||||
{ -1 , qla27xx_fwdt_entry_other }
|
||||
};
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
@ -52,6 +52,7 @@ struct __packed qla27xx_fwdt_template {
|
||||
#define ENTRY_TYPE_WRREMREG 271
|
||||
#define ENTRY_TYPE_RDREMRAM 272
|
||||
#define ENTRY_TYPE_PCICFG 273
|
||||
#define ENTRY_TYPE_GET_SHADOW 274
|
||||
|
||||
#define CAPTURE_FLAG_PHYS_ONLY BIT_0
|
||||
#define CAPTURE_FLAG_PHYS_VIRT BIT_1
|
||||
@ -109,12 +110,12 @@ struct __packed qla27xx_fwdt_entry {
|
||||
} t259;
|
||||
|
||||
struct __packed {
|
||||
uint8_t pci_addr;
|
||||
uint8_t pci_offset;
|
||||
uint8_t reserved[3];
|
||||
} t260;
|
||||
|
||||
struct __packed {
|
||||
uint8_t pci_addr;
|
||||
uint8_t pci_offset;
|
||||
uint8_t reserved[3];
|
||||
uint32_t write_data;
|
||||
} t261;
|
||||
@ -186,6 +187,12 @@ struct __packed qla27xx_fwdt_entry {
|
||||
uint32_t addr;
|
||||
uint32_t count;
|
||||
} t273;
|
||||
|
||||
struct __packed {
|
||||
uint32_t num_queues;
|
||||
uint8_t queue_type;
|
||||
uint8_t reserved[3];
|
||||
} t274;
|
||||
};
|
||||
};
|
||||
|
||||
@ -202,4 +209,8 @@ struct __packed qla27xx_fwdt_entry {
|
||||
#define T268_BUF_TYPE_EXCH_BUFOFF 2
|
||||
#define T268_BUF_TYPE_EXTD_LOGIN 3
|
||||
|
||||
#define T274_QUEUE_TYPE_REQ_SHAD 1
|
||||
#define T274_QUEUE_TYPE_RSP_SHAD 2
|
||||
#define T274_QUEUE_TYPE_ATIO_SHAD 3
|
||||
|
||||
#endif
|
||||
|
@ -1,13 +1,13 @@
|
||||
/*
|
||||
* QLogic Fibre Channel HBA Driver
|
||||
* Copyright (c) 2003-2013 QLogic Corporation
|
||||
* Copyright (c) 2003-2014 QLogic Corporation
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
/*
|
||||
* Driver version
|
||||
*/
|
||||
#define QLA2XXX_VERSION "8.07.00.02-k"
|
||||
#define QLA2XXX_VERSION "8.07.00.08-k"
|
||||
|
||||
#define QLA_DRIVER_MAJOR_VER 8
|
||||
#define QLA_DRIVER_MINOR_VER 7
|
||||
|
@ -472,6 +472,11 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
|
||||
cmd->sg_cnt = se_cmd->t_data_nents;
|
||||
cmd->sg = se_cmd->t_data_sg;
|
||||
|
||||
cmd->prot_sg_cnt = se_cmd->t_prot_nents;
|
||||
cmd->prot_sg = se_cmd->t_prot_sg;
|
||||
cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
|
||||
se_cmd->pi_err = 0;
|
||||
|
||||
/*
|
||||
* qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
|
||||
* the SGL mappings into PCIe memory for incoming FCP WRITE data.
|
||||
@ -567,8 +572,13 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
transport_generic_request_failure(&cmd->se_cmd,
|
||||
TCM_CHECK_CONDITION_ABORT_CMD);
|
||||
if (cmd->se_cmd.pi_err)
|
||||
transport_generic_request_failure(&cmd->se_cmd,
|
||||
cmd->se_cmd.pi_err);
|
||||
else
|
||||
transport_generic_request_failure(&cmd->se_cmd,
|
||||
TCM_CHECK_CONDITION_ABORT_CMD);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -584,6 +594,27 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
|
||||
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
|
||||
}
|
||||
|
||||
static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
|
||||
{
|
||||
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
|
||||
|
||||
/* take an extra kref to prevent cmd free too early.
|
||||
* need to wait for SCSI status/check condition to
|
||||
* finish responding generate by transport_generic_request_failure.
|
||||
*/
|
||||
kref_get(&cmd->se_cmd.cmd_kref);
|
||||
transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from qla_target.c:qlt_do_ctio_completion()
|
||||
*/
|
||||
static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
|
||||
{
|
||||
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
|
||||
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from qla_target.c:qlt_issue_task_mgmt()
|
||||
*/
|
||||
@ -610,6 +641,11 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
|
||||
cmd->sg = se_cmd->t_data_sg;
|
||||
cmd->offset = 0;
|
||||
|
||||
cmd->prot_sg_cnt = se_cmd->t_prot_nents;
|
||||
cmd->prot_sg = se_cmd->t_prot_sg;
|
||||
cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
|
||||
se_cmd->pi_err = 0;
|
||||
|
||||
/*
|
||||
* Now queue completed DATA_IN the qla2xxx LLD and response ring
|
||||
*/
|
||||
@ -1600,6 +1636,7 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
|
||||
static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
|
||||
.handle_cmd = tcm_qla2xxx_handle_cmd,
|
||||
.handle_data = tcm_qla2xxx_handle_data,
|
||||
.handle_dif_err = tcm_qla2xxx_handle_dif_err,
|
||||
.handle_tmr = tcm_qla2xxx_handle_tmr,
|
||||
.free_cmd = tcm_qla2xxx_free_cmd,
|
||||
.free_mcmd = tcm_qla2xxx_free_mcmd,
|
||||
|
@ -249,110 +249,6 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
|
||||
qla4_83xx_flash_unlock(ha);
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory
|
||||
* @ha: Pointer to adapter structure
|
||||
* @addr: Flash address to write to
|
||||
* @data: Data to be written
|
||||
* @count: word_count to be written
|
||||
*
|
||||
* Return: On success return QLA_SUCCESS
|
||||
* On error return QLA_ERROR
|
||||
**/
|
||||
int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
|
||||
uint32_t *data, uint32_t count)
|
||||
{
|
||||
int i, j;
|
||||
uint32_t agt_ctrl;
|
||||
unsigned long flags;
|
||||
int ret_val = QLA_SUCCESS;
|
||||
|
||||
/* Only 128-bit aligned access */
|
||||
if (addr & 0xF) {
|
||||
ret_val = QLA_ERROR;
|
||||
goto exit_ms_mem_write;
|
||||
}
|
||||
|
||||
write_lock_irqsave(&ha->hw_lock, flags);
|
||||
|
||||
/* Write address */
|
||||
ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
|
||||
if (ret_val == QLA_ERROR) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
|
||||
__func__);
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++, addr += 16) {
|
||||
if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
|
||||
QLA8XXX_ADDR_QDR_NET_MAX)) ||
|
||||
(QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
|
||||
QLA8XXX_ADDR_DDR_NET_MAX)))) {
|
||||
ret_val = QLA_ERROR;
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
|
||||
ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
|
||||
addr);
|
||||
/* Write data */
|
||||
ret_val |= qla4_83xx_wr_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_WRDATA_LO,
|
||||
*data++);
|
||||
ret_val |= qla4_83xx_wr_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_WRDATA_HI,
|
||||
*data++);
|
||||
ret_val |= qla4_83xx_wr_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_WRDATA_ULO,
|
||||
*data++);
|
||||
ret_val |= qla4_83xx_wr_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_WRDATA_UHI,
|
||||
*data++);
|
||||
if (ret_val == QLA_ERROR) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
|
||||
__func__);
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
|
||||
/* Check write status */
|
||||
ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
|
||||
MIU_TA_CTL_WRITE_ENABLE);
|
||||
ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
|
||||
MIU_TA_CTL_WRITE_START);
|
||||
if (ret_val == QLA_ERROR) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
|
||||
__func__);
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
|
||||
for (j = 0; j < MAX_CTL_CHECK; j++) {
|
||||
ret_val = qla4_83xx_rd_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_CTRL,
|
||||
&agt_ctrl);
|
||||
if (ret_val == QLA_ERROR) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
|
||||
__func__);
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Status check failed */
|
||||
if (j >= MAX_CTL_CHECK) {
|
||||
printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
|
||||
__func__);
|
||||
ret_val = QLA_ERROR;
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
exit_ms_mem_write_unlock:
|
||||
write_unlock_irqrestore(&ha->hw_lock, flags);
|
||||
|
||||
exit_ms_mem_write:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
#define INTENT_TO_RECOVER 0x01
|
||||
#define PROCEED_TO_RECOVER 0x02
|
||||
|
||||
@ -760,7 +656,7 @@ static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
|
||||
__func__));
|
||||
|
||||
/* 128 bit/16 byte write to MS memory */
|
||||
ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
|
||||
ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
|
||||
count);
|
||||
if (ret_val == QLA_ERROR) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
|
||||
|
@ -254,6 +254,50 @@ struct qla83xx_minidump_entry_pollrd {
|
||||
uint32_t rsvd_1;
|
||||
};
|
||||
|
||||
struct qla8044_minidump_entry_rddfe {
|
||||
struct qla8xxx_minidump_entry_hdr h;
|
||||
uint32_t addr_1;
|
||||
uint32_t value;
|
||||
uint8_t stride;
|
||||
uint8_t stride2;
|
||||
uint16_t count;
|
||||
uint32_t poll;
|
||||
uint32_t mask;
|
||||
uint32_t modify_mask;
|
||||
uint32_t data_size;
|
||||
uint32_t rsvd;
|
||||
|
||||
} __packed;
|
||||
|
||||
struct qla8044_minidump_entry_rdmdio {
|
||||
struct qla8xxx_minidump_entry_hdr h;
|
||||
|
||||
uint32_t addr_1;
|
||||
uint32_t addr_2;
|
||||
uint32_t value_1;
|
||||
uint8_t stride_1;
|
||||
uint8_t stride_2;
|
||||
uint16_t count;
|
||||
uint32_t poll;
|
||||
uint32_t mask;
|
||||
uint32_t value_2;
|
||||
uint32_t data_size;
|
||||
|
||||
} __packed;
|
||||
|
||||
struct qla8044_minidump_entry_pollwr {
|
||||
struct qla8xxx_minidump_entry_hdr h;
|
||||
uint32_t addr_1;
|
||||
uint32_t addr_2;
|
||||
uint32_t value_1;
|
||||
uint32_t value_2;
|
||||
uint32_t poll;
|
||||
uint32_t mask;
|
||||
uint32_t data_size;
|
||||
uint32_t rsvd;
|
||||
|
||||
} __packed;
|
||||
|
||||
/* RDMUX2 Entry */
|
||||
struct qla83xx_minidump_entry_rdmux2 {
|
||||
struct qla8xxx_minidump_entry_hdr h;
|
||||
|
@ -601,6 +601,7 @@ struct scsi_qla_host {
|
||||
#define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/
|
||||
#define DPC_POST_IDC_ACK 23 /* 0x00800000 */
|
||||
#define DPC_RESTORE_ACB 24 /* 0x01000000 */
|
||||
#define DPC_SYSFS_DDB_EXPORT 25 /* 0x02000000 */
|
||||
|
||||
struct Scsi_Host *host; /* pointer to host data */
|
||||
uint32_t tot_ddbs;
|
||||
|
@ -1415,6 +1415,9 @@ struct ql_iscsi_stats {
|
||||
#define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16
|
||||
#define QLA83XX_SS_OCM_WNDREG_INDEX 3
|
||||
#define QLA83XX_SS_PCI_INDEX 0
|
||||
#define QLA8022_TEMPLATE_CAP_OFFSET 172
|
||||
#define QLA83XX_TEMPLATE_CAP_OFFSET 268
|
||||
#define QLA80XX_TEMPLATE_RESERVED_BITS 16
|
||||
|
||||
struct qla4_8xxx_minidump_template_hdr {
|
||||
uint32_t entry_type;
|
||||
@ -1434,6 +1437,7 @@ struct qla4_8xxx_minidump_template_hdr {
|
||||
uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN];
|
||||
uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN];
|
||||
uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN];
|
||||
uint32_t capabilities[QLA80XX_TEMPLATE_RESERVED_BITS];
|
||||
};
|
||||
|
||||
#endif /* _QLA4X_FW_H */
|
||||
|
@ -274,13 +274,14 @@ int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
|
||||
int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
|
||||
uint32_t acb_type, uint32_t len);
|
||||
int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config);
|
||||
int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha,
|
||||
int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha,
|
||||
uint64_t addr, uint32_t *data, uint32_t count);
|
||||
uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
|
||||
int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
|
||||
int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
|
||||
int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha);
|
||||
int qla4_83xx_is_detached(struct scsi_qla_host *ha);
|
||||
int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha);
|
||||
|
||||
extern int ql4xextended_error_logging;
|
||||
extern int ql4xdontresethba;
|
||||
|
@ -282,6 +282,25 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
|
||||
return ipv4_wait|ipv6_wait;
|
||||
}
|
||||
|
||||
static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha,
|
||||
struct qla4_8xxx_minidump_template_hdr *md_hdr)
|
||||
{
|
||||
int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET :
|
||||
QLA83XX_TEMPLATE_CAP_OFFSET;
|
||||
int rval = 1;
|
||||
uint32_t *cap_offset;
|
||||
|
||||
cap_offset = (uint32_t *)((char *)md_hdr + offset);
|
||||
|
||||
if (!(le32_to_cpu(*cap_offset) & BIT_0)) {
|
||||
ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n",
|
||||
*cap_offset);
|
||||
rval = 0;
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
|
||||
* @ha: pointer to host adapter structure.
|
||||
@ -294,6 +313,7 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
|
||||
void *md_tmp;
|
||||
dma_addr_t md_tmp_dma;
|
||||
struct qla4_8xxx_minidump_template_hdr *md_hdr;
|
||||
int dma_capable;
|
||||
|
||||
if (ha->fw_dump) {
|
||||
ql4_printk(KERN_WARNING, ha,
|
||||
@ -326,13 +346,19 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
|
||||
|
||||
md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
|
||||
|
||||
dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr);
|
||||
|
||||
capture_debug_level = md_hdr->capture_debug_level;
|
||||
|
||||
/* Get capture mask based on module loadtime setting. */
|
||||
if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
|
||||
if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) ||
|
||||
(ql4xmdcapmask == 0xFF && dma_capable)) {
|
||||
ha->fw_dump_capture_mask = ql4xmdcapmask;
|
||||
else
|
||||
} else {
|
||||
if (ql4xmdcapmask == 0xFF)
|
||||
ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n");
|
||||
ha->fw_dump_capture_mask = capture_debug_level;
|
||||
}
|
||||
|
||||
md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
|
||||
|
||||
@ -864,6 +890,8 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
|
||||
if (status == QLA_SUCCESS) {
|
||||
if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))
|
||||
qla4xxx_get_crash_record(ha);
|
||||
|
||||
qla4xxx_init_rings(ha);
|
||||
} else {
|
||||
DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n",
|
||||
ha->host_no, __func__));
|
||||
|
@ -1526,7 +1526,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
|
||||
|
||||
int qla4xxx_request_irqs(struct scsi_qla_host *ha)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
int rval = QLA_ERROR;
|
||||
|
||||
if (is_qla40XX(ha))
|
||||
@ -1580,15 +1580,13 @@ try_msi:
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Prevent interrupts from falling back to INTx mode in cases where
|
||||
* interrupts cannot get acquired through MSI-X or MSI mode.
|
||||
*/
|
||||
try_intx:
|
||||
if (is_qla8022(ha)) {
|
||||
ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret);
|
||||
ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n",
|
||||
__func__);
|
||||
goto irq_not_attached;
|
||||
}
|
||||
try_intx:
|
||||
|
||||
/* Trying INTx */
|
||||
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
|
||||
IRQF_SHARED, DRIVER_NAME, ha);
|
||||
|
@ -2381,7 +2381,7 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
|
||||
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
|
||||
__func__);
|
||||
rval = QLA_ERROR;
|
||||
goto exit_config_acb;
|
||||
goto exit_free_acb;
|
||||
}
|
||||
memcpy(ha->saved_acb, acb, acb_len);
|
||||
break;
|
||||
@ -2395,8 +2395,6 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
|
||||
}
|
||||
|
||||
memcpy(acb, ha->saved_acb, acb_len);
|
||||
kfree(ha->saved_acb);
|
||||
ha->saved_acb = NULL;
|
||||
|
||||
rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
|
||||
if (rval != QLA_SUCCESS)
|
||||
@ -2412,6 +2410,10 @@ exit_free_acb:
|
||||
dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
|
||||
acb_dma);
|
||||
exit_config_acb:
|
||||
if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) {
|
||||
kfree(ha->saved_acb);
|
||||
ha->saved_acb = NULL;
|
||||
}
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
"%s %s\n", __func__,
|
||||
rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
#include <asm-generic/io-64-nonatomic-lo-hi.h>
|
||||
|
||||
#define TIMEOUT_100_MS 100
|
||||
#define MASK(n) DMA_BIT_MASK(n)
|
||||
#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
|
||||
#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
|
||||
@ -1176,6 +1177,112 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory
|
||||
* @ha: Pointer to adapter structure
|
||||
* @addr: Flash address to write to
|
||||
* @data: Data to be written
|
||||
* @count: word_count to be written
|
||||
*
|
||||
* Return: On success return QLA_SUCCESS
|
||||
* On error return QLA_ERROR
|
||||
**/
|
||||
int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
|
||||
uint32_t *data, uint32_t count)
|
||||
{
|
||||
int i, j;
|
||||
uint32_t agt_ctrl;
|
||||
unsigned long flags;
|
||||
int ret_val = QLA_SUCCESS;
|
||||
|
||||
/* Only 128-bit aligned access */
|
||||
if (addr & 0xF) {
|
||||
ret_val = QLA_ERROR;
|
||||
goto exit_ms_mem_write;
|
||||
}
|
||||
|
||||
write_lock_irqsave(&ha->hw_lock, flags);
|
||||
|
||||
/* Write address */
|
||||
ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
|
||||
if (ret_val == QLA_ERROR) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
|
||||
__func__);
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++, addr += 16) {
|
||||
if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
|
||||
QLA8XXX_ADDR_QDR_NET_MAX)) ||
|
||||
(QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
|
||||
QLA8XXX_ADDR_DDR_NET_MAX)))) {
|
||||
ret_val = QLA_ERROR;
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
|
||||
ret_val = ha->isp_ops->wr_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_ADDR_LO,
|
||||
addr);
|
||||
/* Write data */
|
||||
ret_val |= ha->isp_ops->wr_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_WRDATA_LO,
|
||||
*data++);
|
||||
ret_val |= ha->isp_ops->wr_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_WRDATA_HI,
|
||||
*data++);
|
||||
ret_val |= ha->isp_ops->wr_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_WRDATA_ULO,
|
||||
*data++);
|
||||
ret_val |= ha->isp_ops->wr_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_WRDATA_UHI,
|
||||
*data++);
|
||||
if (ret_val == QLA_ERROR) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
|
||||
__func__);
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
|
||||
/* Check write status */
|
||||
ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
|
||||
MIU_TA_CTL_WRITE_ENABLE);
|
||||
ret_val |= ha->isp_ops->wr_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_CTRL,
|
||||
MIU_TA_CTL_WRITE_START);
|
||||
if (ret_val == QLA_ERROR) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
|
||||
__func__);
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
|
||||
for (j = 0; j < MAX_CTL_CHECK; j++) {
|
||||
ret_val = ha->isp_ops->rd_reg_indirect(ha,
|
||||
MD_MIU_TEST_AGT_CTRL,
|
||||
&agt_ctrl);
|
||||
if (ret_val == QLA_ERROR) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
|
||||
__func__);
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Status check failed */
|
||||
if (j >= MAX_CTL_CHECK) {
|
||||
printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
|
||||
__func__);
|
||||
ret_val = QLA_ERROR;
|
||||
goto exit_ms_mem_write_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
exit_ms_mem_write_unlock:
|
||||
write_unlock_irqrestore(&ha->hw_lock, flags);
|
||||
|
||||
exit_ms_mem_write:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
static int
|
||||
qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
|
||||
{
|
||||
@ -1714,6 +1821,101 @@ void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
|
||||
qla4_82xx_rom_unlock(ha);
|
||||
}
|
||||
|
||||
static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
|
||||
uint32_t addr1, uint32_t mask)
|
||||
{
|
||||
unsigned long timeout;
|
||||
uint32_t rval = QLA_SUCCESS;
|
||||
uint32_t temp;
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
|
||||
do {
|
||||
ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
|
||||
if ((temp & mask) != 0)
|
||||
break;
|
||||
|
||||
if (time_after_eq(jiffies, timeout)) {
|
||||
ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n");
|
||||
return QLA_ERROR;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
|
||||
uint32_t addr3, uint32_t mask, uint32_t addr,
|
||||
uint32_t *data_ptr)
|
||||
{
|
||||
int rval = QLA_SUCCESS;
|
||||
uint32_t temp;
|
||||
uint32_t data;
|
||||
|
||||
rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
|
||||
if (rval)
|
||||
goto exit_ipmdio_rd_reg;
|
||||
|
||||
temp = (0x40000000 | addr);
|
||||
ha->isp_ops->wr_reg_indirect(ha, addr1, temp);
|
||||
|
||||
rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
|
||||
if (rval)
|
||||
goto exit_ipmdio_rd_reg;
|
||||
|
||||
ha->isp_ops->rd_reg_indirect(ha, addr3, &data);
|
||||
*data_ptr = data;
|
||||
|
||||
exit_ipmdio_rd_reg:
|
||||
return rval;
|
||||
}
|
||||
|
||||
|
||||
static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha,
|
||||
uint32_t addr1,
|
||||
uint32_t addr2,
|
||||
uint32_t addr3,
|
||||
uint32_t mask)
|
||||
{
|
||||
unsigned long timeout;
|
||||
uint32_t temp;
|
||||
uint32_t rval = QLA_SUCCESS;
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
|
||||
do {
|
||||
ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp);
|
||||
if ((temp & 0x1) != 1)
|
||||
break;
|
||||
if (time_after_eq(jiffies, timeout)) {
|
||||
ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n");
|
||||
return QLA_ERROR;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha,
|
||||
uint32_t addr1, uint32_t addr3,
|
||||
uint32_t mask, uint32_t addr,
|
||||
uint32_t value)
|
||||
{
|
||||
int rval = QLA_SUCCESS;
|
||||
|
||||
rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
|
||||
if (rval)
|
||||
goto exit_ipmdio_wr_reg;
|
||||
|
||||
ha->isp_ops->wr_reg_indirect(ha, addr3, value);
|
||||
ha->isp_ops->wr_reg_indirect(ha, addr1, addr);
|
||||
|
||||
rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
|
||||
if (rval)
|
||||
goto exit_ipmdio_wr_reg;
|
||||
|
||||
exit_ipmdio_wr_reg:
|
||||
return rval;
|
||||
}
|
||||
|
||||
static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
|
||||
struct qla8xxx_minidump_entry_hdr *entry_hdr,
|
||||
uint32_t **d_ptr)
|
||||
@ -1822,7 +2024,7 @@ error_exit:
|
||||
return rval;
|
||||
}
|
||||
|
||||
static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha,
|
||||
static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha,
|
||||
struct qla8xxx_minidump_entry_hdr *entry_hdr,
|
||||
uint32_t **d_ptr)
|
||||
{
|
||||
@ -1899,11 +2101,11 @@ static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha,
|
||||
dma_desc.cmd.read_data_size = size;
|
||||
|
||||
/* Prepare: Write pex-dma descriptor to MS memory. */
|
||||
rval = qla4_83xx_ms_mem_write_128b(ha,
|
||||
rval = qla4_8xxx_ms_mem_write_128b(ha,
|
||||
(uint64_t)m_hdr->desc_card_addr,
|
||||
(uint32_t *)&dma_desc,
|
||||
(sizeof(struct qla4_83xx_pex_dma_descriptor)/16));
|
||||
if (rval == -1) {
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql4_printk(KERN_INFO, ha,
|
||||
"%s: Error writing rdmem-dma-init to MS !!!\n",
|
||||
__func__);
|
||||
@ -2359,17 +2561,10 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
|
||||
uint32_t *data_ptr = *d_ptr;
|
||||
int rval = QLA_SUCCESS;
|
||||
|
||||
if (is_qla8032(ha) || is_qla8042(ha)) {
|
||||
rval = qla4_83xx_minidump_pex_dma_read(ha, entry_hdr,
|
||||
&data_ptr);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
|
||||
&data_ptr);
|
||||
}
|
||||
} else {
|
||||
rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr);
|
||||
if (rval != QLA_SUCCESS)
|
||||
rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
|
||||
&data_ptr);
|
||||
}
|
||||
*d_ptr = data_ptr;
|
||||
return rval;
|
||||
}
|
||||
@ -2440,6 +2635,227 @@ exit_process_pollrd:
|
||||
return rval;
|
||||
}
|
||||
|
||||
static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha,
|
||||
struct qla8xxx_minidump_entry_hdr *entry_hdr,
|
||||
uint32_t **d_ptr)
|
||||
{
|
||||
int loop_cnt;
|
||||
uint32_t addr1, addr2, value, data, temp, wrval;
|
||||
uint8_t stride, stride2;
|
||||
uint16_t count;
|
||||
uint32_t poll, mask, data_size, modify_mask;
|
||||
uint32_t wait_count = 0;
|
||||
uint32_t *data_ptr = *d_ptr;
|
||||
struct qla8044_minidump_entry_rddfe *rddfe;
|
||||
uint32_t rval = QLA_SUCCESS;
|
||||
|
||||
rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr;
|
||||
addr1 = le32_to_cpu(rddfe->addr_1);
|
||||
value = le32_to_cpu(rddfe->value);
|
||||
stride = le32_to_cpu(rddfe->stride);
|
||||
stride2 = le32_to_cpu(rddfe->stride2);
|
||||
count = le32_to_cpu(rddfe->count);
|
||||
|
||||
poll = le32_to_cpu(rddfe->poll);
|
||||
mask = le32_to_cpu(rddfe->mask);
|
||||
modify_mask = le32_to_cpu(rddfe->modify_mask);
|
||||
data_size = le32_to_cpu(rddfe->data_size);
|
||||
|
||||
addr2 = addr1 + stride;
|
||||
|
||||
for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
|
||||
ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value));
|
||||
|
||||
wait_count = 0;
|
||||
while (wait_count < poll) {
|
||||
ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
|
||||
if ((temp & mask) != 0)
|
||||
break;
|
||||
wait_count++;
|
||||
}
|
||||
|
||||
if (wait_count == poll) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
|
||||
rval = QLA_ERROR;
|
||||
goto exit_process_rddfe;
|
||||
} else {
|
||||
ha->isp_ops->rd_reg_indirect(ha, addr2, &temp);
|
||||
temp = temp & modify_mask;
|
||||
temp = (temp | ((loop_cnt << 16) | loop_cnt));
|
||||
wrval = ((temp << 16) | temp);
|
||||
|
||||
ha->isp_ops->wr_reg_indirect(ha, addr2, wrval);
|
||||
ha->isp_ops->wr_reg_indirect(ha, addr1, value);
|
||||
|
||||
wait_count = 0;
|
||||
while (wait_count < poll) {
|
||||
ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
|
||||
if ((temp & mask) != 0)
|
||||
break;
|
||||
wait_count++;
|
||||
}
|
||||
if (wait_count == poll) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
|
||||
__func__);
|
||||
rval = QLA_ERROR;
|
||||
goto exit_process_rddfe;
|
||||
}
|
||||
|
||||
ha->isp_ops->wr_reg_indirect(ha, addr1,
|
||||
((0x40000000 | value) +
|
||||
stride2));
|
||||
wait_count = 0;
|
||||
while (wait_count < poll) {
|
||||
ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
|
||||
if ((temp & mask) != 0)
|
||||
break;
|
||||
wait_count++;
|
||||
}
|
||||
|
||||
if (wait_count == poll) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
|
||||
__func__);
|
||||
rval = QLA_ERROR;
|
||||
goto exit_process_rddfe;
|
||||
}
|
||||
|
||||
ha->isp_ops->rd_reg_indirect(ha, addr2, &data);
|
||||
|
||||
*data_ptr++ = cpu_to_le32(wrval);
|
||||
*data_ptr++ = cpu_to_le32(data);
|
||||
}
|
||||
}
|
||||
|
||||
*d_ptr = data_ptr;
|
||||
exit_process_rddfe:
|
||||
return rval;
|
||||
}
|
||||
|
||||
static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha,
|
||||
struct qla8xxx_minidump_entry_hdr *entry_hdr,
|
||||
uint32_t **d_ptr)
|
||||
{
|
||||
int rval = QLA_SUCCESS;
|
||||
uint32_t addr1, addr2, value1, value2, data, selval;
|
||||
uint8_t stride1, stride2;
|
||||
uint32_t addr3, addr4, addr5, addr6, addr7;
|
||||
uint16_t count, loop_cnt;
|
||||
uint32_t poll, mask;
|
||||
uint32_t *data_ptr = *d_ptr;
|
||||
struct qla8044_minidump_entry_rdmdio *rdmdio;
|
||||
|
||||
rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr;
|
||||
addr1 = le32_to_cpu(rdmdio->addr_1);
|
||||
addr2 = le32_to_cpu(rdmdio->addr_2);
|
||||
value1 = le32_to_cpu(rdmdio->value_1);
|
||||
stride1 = le32_to_cpu(rdmdio->stride_1);
|
||||
stride2 = le32_to_cpu(rdmdio->stride_2);
|
||||
count = le32_to_cpu(rdmdio->count);
|
||||
|
||||
poll = le32_to_cpu(rdmdio->poll);
|
||||
mask = le32_to_cpu(rdmdio->mask);
|
||||
value2 = le32_to_cpu(rdmdio->value_2);
|
||||
|
||||
addr3 = addr1 + stride1;
|
||||
|
||||
for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
|
||||
rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
|
||||
addr3, mask);
|
||||
if (rval)
|
||||
goto exit_process_rdmdio;
|
||||
|
||||
addr4 = addr2 - stride1;
|
||||
rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4,
|
||||
value2);
|
||||
if (rval)
|
||||
goto exit_process_rdmdio;
|
||||
|
||||
addr5 = addr2 - (2 * stride1);
|
||||
rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5,
|
||||
value1);
|
||||
if (rval)
|
||||
goto exit_process_rdmdio;
|
||||
|
||||
addr6 = addr2 - (3 * stride1);
|
||||
rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask,
|
||||
addr6, 0x2);
|
||||
if (rval)
|
||||
goto exit_process_rdmdio;
|
||||
|
||||
rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
|
||||
addr3, mask);
|
||||
if (rval)
|
||||
goto exit_process_rdmdio;
|
||||
|
||||
addr7 = addr2 - (4 * stride1);
|
||||
rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3,
|
||||
mask, addr7, &data);
|
||||
if (rval)
|
||||
goto exit_process_rdmdio;
|
||||
|
||||
selval = (value2 << 18) | (value1 << 2) | 2;
|
||||
|
||||
stride2 = le32_to_cpu(rdmdio->stride_2);
|
||||
*data_ptr++ = cpu_to_le32(selval);
|
||||
*data_ptr++ = cpu_to_le32(data);
|
||||
|
||||
value1 = value1 + stride2;
|
||||
*d_ptr = data_ptr;
|
||||
}
|
||||
|
||||
exit_process_rdmdio:
|
||||
return rval;
|
||||
}
|
||||
|
||||
static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha,
|
||||
struct qla8xxx_minidump_entry_hdr *entry_hdr,
|
||||
uint32_t **d_ptr)
|
||||
{
|
||||
uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
|
||||
struct qla8044_minidump_entry_pollwr *pollwr_hdr;
|
||||
uint32_t wait_count = 0;
|
||||
uint32_t rval = QLA_SUCCESS;
|
||||
|
||||
pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
|
||||
addr1 = le32_to_cpu(pollwr_hdr->addr_1);
|
||||
addr2 = le32_to_cpu(pollwr_hdr->addr_2);
|
||||
value1 = le32_to_cpu(pollwr_hdr->value_1);
|
||||
value2 = le32_to_cpu(pollwr_hdr->value_2);
|
||||
|
||||
poll = le32_to_cpu(pollwr_hdr->poll);
|
||||
mask = le32_to_cpu(pollwr_hdr->mask);
|
||||
|
||||
while (wait_count < poll) {
|
||||
ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
|
||||
|
||||
if ((r_value & poll) != 0)
|
||||
break;
|
||||
|
||||
wait_count++;
|
||||
}
|
||||
|
||||
if (wait_count == poll) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
|
||||
rval = QLA_ERROR;
|
||||
goto exit_process_pollwr;
|
||||
}
|
||||
|
||||
ha->isp_ops->wr_reg_indirect(ha, addr2, value2);
|
||||
ha->isp_ops->wr_reg_indirect(ha, addr1, value1);
|
||||
|
||||
wait_count = 0;
|
||||
while (wait_count < poll) {
|
||||
ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
|
||||
|
||||
if ((r_value & poll) != 0)
|
||||
break;
|
||||
wait_count++;
|
||||
}
|
||||
|
||||
exit_process_pollwr:
|
||||
return rval;
|
||||
}
|
||||
|
||||
static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
|
||||
struct qla8xxx_minidump_entry_hdr *entry_hdr,
|
||||
uint32_t **d_ptr)
|
||||
@ -2753,6 +3169,24 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
|
||||
if (rval != QLA_SUCCESS)
|
||||
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
|
||||
break;
|
||||
case QLA8044_RDDFE:
|
||||
rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr,
|
||||
&data_ptr);
|
||||
if (rval != QLA_SUCCESS)
|
||||
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
|
||||
break;
|
||||
case QLA8044_RDMDIO:
|
||||
rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr,
|
||||
&data_ptr);
|
||||
if (rval != QLA_SUCCESS)
|
||||
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
|
||||
break;
|
||||
case QLA8044_POLLWR:
|
||||
rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr,
|
||||
&data_ptr);
|
||||
if (rval != QLA_SUCCESS)
|
||||
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
|
||||
break;
|
||||
case QLA8XXX_RDNOP:
|
||||
default:
|
||||
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
|
||||
|
@ -858,6 +858,9 @@ struct crb_addr_pair {
|
||||
#define QLA83XX_POLLRD 35
|
||||
#define QLA83XX_RDMUX2 36
|
||||
#define QLA83XX_POLLRDMWR 37
|
||||
#define QLA8044_RDDFE 38
|
||||
#define QLA8044_RDMDIO 39
|
||||
#define QLA8044_POLLWR 40
|
||||
#define QLA8XXX_RDROM 71
|
||||
#define QLA8XXX_RDMEM 72
|
||||
#define QLA8XXX_CNTRL 98
|
||||
|
@ -83,12 +83,12 @@ MODULE_PARM_DESC(ql4xsess_recovery_tmo,
|
||||
" Target Session Recovery Timeout.\n"
|
||||
"\t\t Default: 120 sec.");
|
||||
|
||||
int ql4xmdcapmask = 0x1F;
|
||||
int ql4xmdcapmask = 0;
|
||||
module_param(ql4xmdcapmask, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(ql4xmdcapmask,
|
||||
" Set the Minidump driver capture mask level.\n"
|
||||
"\t\t Default is 0x1F.\n"
|
||||
"\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
|
||||
"\t\t Default is 0 (firmware default capture mask)\n"
|
||||
"\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF");
|
||||
|
||||
int ql4xenablemd = 1;
|
||||
module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
|
||||
@ -1742,6 +1742,9 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
|
||||
struct sockaddr *dst_addr;
|
||||
struct scsi_qla_host *ha;
|
||||
|
||||
if (!qla_ep)
|
||||
return -ENOTCONN;
|
||||
|
||||
ha = to_qla_host(qla_ep->host);
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
|
||||
ha->host_no));
|
||||
@ -1749,9 +1752,6 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
|
||||
switch (param) {
|
||||
case ISCSI_PARAM_CONN_PORT:
|
||||
case ISCSI_PARAM_CONN_ADDRESS:
|
||||
if (!qla_ep)
|
||||
return -ENOTCONN;
|
||||
|
||||
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
|
||||
if (!dst_addr)
|
||||
return -ENOTCONN;
|
||||
@ -2879,7 +2879,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
|
||||
struct iscsi_conn *conn;
|
||||
struct qla_conn *qla_conn;
|
||||
struct sockaddr *dst_addr;
|
||||
int len = 0;
|
||||
|
||||
conn = cls_conn->dd_data;
|
||||
qla_conn = conn->dd_data;
|
||||
@ -2893,9 +2892,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
|
||||
default:
|
||||
return iscsi_conn_get_param(cls_conn, param, buf);
|
||||
}
|
||||
|
||||
return len;
|
||||
|
||||
}
|
||||
|
||||
int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
|
||||
@ -3569,14 +3565,13 @@ static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
|
||||
if (test_bit(OPT_IPV6_DEVICE, &options)) {
|
||||
conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
|
||||
|
||||
conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
|
||||
conn->link_local_ipv6_addr = kmemdup(
|
||||
fw_ddb_entry->link_local_ipv6_addr,
|
||||
IPv6_ADDR_LEN, GFP_KERNEL);
|
||||
if (!conn->link_local_ipv6_addr) {
|
||||
rc = -ENOMEM;
|
||||
goto exit_copy;
|
||||
}
|
||||
|
||||
memcpy(conn->link_local_ipv6_addr,
|
||||
fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
|
||||
} else {
|
||||
conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
|
||||
}
|
||||
@ -4565,6 +4560,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
|
||||
test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
|
||||
test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
|
||||
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
|
||||
test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) ||
|
||||
test_bit(DPC_AEN, &ha->dpc_flags)) {
|
||||
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
|
||||
" - dpc flags = 0x%lx\n",
|
||||
@ -4862,9 +4858,6 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
|
||||
ha->host_no, __func__));
|
||||
status = ha->isp_ops->reset_firmware(ha);
|
||||
if (status == QLA_SUCCESS) {
|
||||
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
|
||||
qla4xxx_cmd_wait(ha);
|
||||
|
||||
ha->isp_ops->disable_intrs(ha);
|
||||
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
|
||||
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
|
||||
@ -5432,6 +5425,11 @@ dpc_post_reset_ha:
|
||||
qla4xxx_relogin_all_devices(ha);
|
||||
}
|
||||
}
|
||||
if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) {
|
||||
if (qla4xxx_sysfs_ddb_export(ha))
|
||||
ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n",
|
||||
__func__);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -8409,7 +8407,7 @@ exit_ddb_del:
|
||||
*
|
||||
* Export the firmware DDB for all send targets and normal targets to sysfs.
|
||||
**/
|
||||
static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
|
||||
int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
|
||||
{
|
||||
struct dev_db_entry *fw_ddb_entry = NULL;
|
||||
dma_addr_t fw_ddb_entry_dma;
|
||||
@ -8847,11 +8845,8 @@ skip_retry_init:
|
||||
ql4_printk(KERN_ERR, ha,
|
||||
"%s: No iSCSI boot target configured\n", __func__);
|
||||
|
||||
if (qla4xxx_sysfs_ddb_export(ha))
|
||||
ql4_printk(KERN_ERR, ha,
|
||||
"%s: Error exporting ddb to sysfs\n", __func__);
|
||||
|
||||
/* Perform the build ddb list and login to each */
|
||||
set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags);
|
||||
/* Perform the build ddb list and login to each */
|
||||
qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
|
||||
iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
|
||||
qla4xxx_wait_login_resp_boot_tgt(ha);
|
||||
|
@ -5,4 +5,4 @@
|
||||
* See LICENSE.qla4xxx for copyright and licensing details.
|
||||
*/
|
||||
|
||||
#define QLA4XXX_DRIVER_VERSION "5.04.00-k4"
|
||||
#define QLA4XXX_DRIVER_VERSION "5.04.00-k6"
|
||||
|
@ -130,6 +130,7 @@ static const char * scsi_debug_version_date = "20100324";
|
||||
#define SCSI_DEBUG_OPT_DIF_ERR 32
|
||||
#define SCSI_DEBUG_OPT_DIX_ERR 64
|
||||
#define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
|
||||
#define SCSI_DEBUG_OPT_SHORT_TRANSFER 256
|
||||
/* When "every_nth" > 0 then modulo "every_nth" commands:
|
||||
* - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
|
||||
* - a RECOVERED_ERROR is simulated on successful read and write
|
||||
@ -3583,6 +3584,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
|
||||
int inj_transport = 0;
|
||||
int inj_dif = 0;
|
||||
int inj_dix = 0;
|
||||
int inj_short = 0;
|
||||
int delay_override = 0;
|
||||
int unmap = 0;
|
||||
|
||||
@ -3628,6 +3630,8 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
|
||||
inj_dif = 1; /* to reads and writes below */
|
||||
else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
|
||||
inj_dix = 1; /* to reads and writes below */
|
||||
else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
|
||||
inj_short = 1;
|
||||
}
|
||||
|
||||
if (devip->wlun) {
|
||||
@ -3744,6 +3748,10 @@ read:
|
||||
if (scsi_debug_fake_rw)
|
||||
break;
|
||||
get_data_transfer_info(cmd, &lba, &num, &ei_lba);
|
||||
|
||||
if (inj_short)
|
||||
num /= 2;
|
||||
|
||||
errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
|
||||
if (inj_recovered && (0 == errsts)) {
|
||||
mk_sense_buffer(devip, RECOVERED_ERROR,
|
||||
|
@ -364,7 +364,7 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
|
||||
return( 0 );
|
||||
if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=
|
||||
TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) {
|
||||
TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n",
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
|
||||
H_NO(cmd), cmd->device->id, cmd->device->lun );
|
||||
return( 1 );
|
||||
}
|
||||
@ -388,7 +388,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
|
||||
!setup_use_tagged_queuing || !cmd->device->tagged_supported) {
|
||||
cmd->tag = TAG_NONE;
|
||||
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
|
||||
TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged "
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
|
||||
"command\n", H_NO(cmd), cmd->device->id, cmd->device->lun );
|
||||
}
|
||||
else {
|
||||
@ -397,7 +397,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
|
||||
cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS );
|
||||
set_bit( cmd->tag, &ta->allocated );
|
||||
ta->nr_allocated++;
|
||||
TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d "
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
|
||||
"(now %d tags in use)\n",
|
||||
H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun,
|
||||
ta->nr_allocated );
|
||||
@ -415,7 +415,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd)
|
||||
|
||||
if (cmd->tag == TAG_NONE) {
|
||||
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
|
||||
TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n",
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
|
||||
H_NO(cmd), cmd->device->id, cmd->device->lun );
|
||||
}
|
||||
else if (cmd->tag >= MAX_TAGS) {
|
||||
@ -426,7 +426,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd)
|
||||
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
|
||||
clear_bit( cmd->tag, &ta->allocated );
|
||||
ta->nr_allocated--;
|
||||
TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n",
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
|
||||
H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun );
|
||||
}
|
||||
}
|
||||
@ -484,7 +484,7 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#if 1
|
||||
#if NDEBUG
|
||||
static struct {
|
||||
unsigned char mask;
|
||||
const char * name;}
|
||||
@ -572,12 +572,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
|
||||
}
|
||||
}
|
||||
|
||||
#else /* !NDEBUG */
|
||||
|
||||
/* dummies... */
|
||||
__inline__ void NCR5380_print(struct Scsi_Host *instance) { };
|
||||
__inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { };
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -618,7 +612,7 @@ static inline void NCR5380_all_init (void)
|
||||
{
|
||||
static int done = 0;
|
||||
if (!done) {
|
||||
INI_PRINTK("scsi : NCR5380_all_init()\n");
|
||||
dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
|
||||
done = 1;
|
||||
}
|
||||
}
|
||||
@ -681,8 +675,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
|
||||
Scsi_Cmnd *ptr;
|
||||
unsigned long flags;
|
||||
|
||||
NCR_PRINT(NDEBUG_ANY);
|
||||
NCR_PRINT_PHASE(NDEBUG_ANY);
|
||||
NCR5380_dprint(NDEBUG_ANY, instance);
|
||||
NCR5380_dprint_phase(NDEBUG_ANY, instance);
|
||||
|
||||
hostdata = (struct NCR5380_hostdata *)instance->hostdata;
|
||||
|
||||
@ -928,7 +922,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd),
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),
|
||||
(cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
|
||||
|
||||
/* If queue_command() is called from an interrupt (real one or bottom
|
||||
@ -998,7 +992,7 @@ static void NCR5380_main (struct work_struct *bl)
|
||||
done = 1;
|
||||
|
||||
if (!hostdata->connected) {
|
||||
MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO );
|
||||
dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO );
|
||||
/*
|
||||
* Search through the issue_queue for a command destined
|
||||
* for a target that's not busy.
|
||||
@ -1012,12 +1006,8 @@ static void NCR5380_main (struct work_struct *bl)
|
||||
for (tmp = (struct scsi_cmnd *) hostdata->issue_queue,
|
||||
prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) {
|
||||
|
||||
#if (NDEBUG & NDEBUG_LISTS)
|
||||
if (prev != tmp)
|
||||
printk("MAIN tmp=%p target=%d busy=%d lun=%d\n",
|
||||
tmp, tmp->target, hostdata->busy[tmp->target],
|
||||
tmp->lun);
|
||||
#endif
|
||||
dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
|
||||
/* When we find one, remove it from the issue queue. */
|
||||
/* ++guenther: possible race with Falcon locking */
|
||||
if (
|
||||
@ -1047,9 +1037,9 @@ static void NCR5380_main (struct work_struct *bl)
|
||||
* On failure, we must add the command back to the
|
||||
* issue queue so we can keep trying.
|
||||
*/
|
||||
MAIN_PRINTK("scsi%d: main(): command for target %d "
|
||||
dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
|
||||
"lun %d removed from issue_queue\n",
|
||||
HOSTNO, tmp->target, tmp->lun);
|
||||
HOSTNO, tmp->device->id, tmp->device->lun);
|
||||
/*
|
||||
* REQUEST SENSE commands are issued without tagged
|
||||
* queueing, even on SCSI-II devices because the
|
||||
@ -1076,7 +1066,7 @@ static void NCR5380_main (struct work_struct *bl)
|
||||
cmd_free_tag( tmp );
|
||||
#endif
|
||||
local_irq_restore(flags);
|
||||
MAIN_PRINTK("scsi%d: main(): select() failed, "
|
||||
dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
|
||||
"returned to issue_queue\n", HOSTNO);
|
||||
if (hostdata->connected)
|
||||
break;
|
||||
@ -1090,10 +1080,10 @@ static void NCR5380_main (struct work_struct *bl)
|
||||
#endif
|
||||
) {
|
||||
local_irq_restore(flags);
|
||||
MAIN_PRINTK("scsi%d: main: performing information transfer\n",
|
||||
dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",
|
||||
HOSTNO);
|
||||
NCR5380_information_transfer(instance);
|
||||
MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO);
|
||||
dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);
|
||||
done = 0;
|
||||
}
|
||||
} while (!done);
|
||||
@ -1130,7 +1120,7 @@ static void NCR5380_dma_complete( struct Scsi_Host *instance )
|
||||
return;
|
||||
}
|
||||
|
||||
DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
|
||||
dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
|
||||
HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
|
||||
NCR5380_read(STATUS_REG));
|
||||
|
||||
@ -1189,27 +1179,27 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
|
||||
int done = 1, handled = 0;
|
||||
unsigned char basr;
|
||||
|
||||
INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);
|
||||
|
||||
/* Look for pending interrupts */
|
||||
basr = NCR5380_read(BUS_AND_STATUS_REG);
|
||||
INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);
|
||||
/* dispatch to appropriate routine if found and done=0 */
|
||||
if (basr & BASR_IRQ) {
|
||||
NCR_PRINT(NDEBUG_INTR);
|
||||
NCR5380_dprint(NDEBUG_INTR, instance);
|
||||
if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
|
||||
done = 0;
|
||||
// ENABLE_IRQ();
|
||||
INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
|
||||
NCR5380_reselect(instance);
|
||||
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
}
|
||||
else if (basr & BASR_PARITY_ERROR) {
|
||||
INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);
|
||||
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
}
|
||||
else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
|
||||
INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);
|
||||
(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
|
||||
}
|
||||
else {
|
||||
@ -1229,7 +1219,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
|
||||
((basr & BASR_END_DMA_TRANSFER) ||
|
||||
!(basr & BASR_PHASE_MATCH))) {
|
||||
|
||||
INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
|
||||
NCR5380_dma_complete( instance );
|
||||
done = 0;
|
||||
// ENABLE_IRQ();
|
||||
@ -1238,7 +1228,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
|
||||
{
|
||||
/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */
|
||||
if (basr & BASR_PHASE_MATCH)
|
||||
INT_PRINTK("scsi%d: unknown interrupt, "
|
||||
dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, "
|
||||
"BASR 0x%x, MR 0x%x, SR 0x%x\n",
|
||||
HOSTNO, basr, NCR5380_read(MODE_REG),
|
||||
NCR5380_read(STATUS_REG));
|
||||
@ -1262,7 +1252,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
|
||||
}
|
||||
|
||||
if (!done) {
|
||||
INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO);
|
||||
dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
|
||||
/* Put a call to NCR5380_main() on the queue... */
|
||||
queue_main();
|
||||
}
|
||||
@ -1338,8 +1328,8 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
|
||||
unsigned long flags;
|
||||
|
||||
hostdata->restart_select = 0;
|
||||
NCR_PRINT(NDEBUG_ARBITRATION);
|
||||
ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO,
|
||||
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,
|
||||
instance->this_id);
|
||||
|
||||
/*
|
||||
@ -1385,7 +1375,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
|
||||
&& !hostdata->connected);
|
||||
#endif
|
||||
|
||||
ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO);
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);
|
||||
|
||||
if (hostdata->connected) {
|
||||
NCR5380_write(MODE_REG, MR_BASE);
|
||||
@ -1406,7 +1396,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
|
||||
(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
|
||||
hostdata->connected) {
|
||||
NCR5380_write(MODE_REG, MR_BASE);
|
||||
ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
|
||||
HOSTNO);
|
||||
return -1;
|
||||
}
|
||||
@ -1421,7 +1411,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
|
||||
hostdata->connected) {
|
||||
NCR5380_write(MODE_REG, MR_BASE);
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
|
||||
HOSTNO);
|
||||
return -1;
|
||||
}
|
||||
@ -1444,7 +1434,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
|
||||
return -1;
|
||||
}
|
||||
|
||||
ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO);
|
||||
dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);
|
||||
|
||||
/*
|
||||
* Now that we have won arbitration, start Selection process, asserting
|
||||
@ -1504,7 +1494,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
|
||||
|
||||
udelay(1);
|
||||
|
||||
SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
|
||||
|
||||
/*
|
||||
* The SCSI specification calls for a 250 ms timeout for the actual
|
||||
@ -1559,7 +1549,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
|
||||
printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
|
||||
if (hostdata->restart_select)
|
||||
printk(KERN_NOTICE "\trestart select\n");
|
||||
NCR_PRINT(NDEBUG_ANY);
|
||||
NCR5380_dprint(NDEBUG_ANY, instance);
|
||||
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
|
||||
return -1;
|
||||
}
|
||||
@ -1572,7 +1562,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
|
||||
#endif
|
||||
cmd->scsi_done(cmd);
|
||||
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
|
||||
SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO);
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);
|
||||
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
|
||||
return 0;
|
||||
}
|
||||
@ -1597,7 +1587,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
|
||||
/* Wait for start of REQ/ACK handshake */
|
||||
while (!(NCR5380_read(STATUS_REG) & SR_REQ));
|
||||
|
||||
SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
|
||||
HOSTNO, cmd->device->id);
|
||||
tmp[0] = IDENTIFY(1, cmd->device->lun);
|
||||
|
||||
@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
|
||||
data = tmp;
|
||||
phase = PHASE_MSGOUT;
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data);
|
||||
SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO);
|
||||
dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);
|
||||
/* XXX need to handle errors here */
|
||||
hostdata->connected = cmd;
|
||||
#ifndef SUPPORT_TAGS
|
||||
@ -1680,12 +1670,12 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
|
||||
*/
|
||||
while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
|
||||
|
||||
HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO);
|
||||
dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);
|
||||
|
||||
/* Check for phase mismatch */
|
||||
if ((tmp & PHASE_MASK) != p) {
|
||||
PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO);
|
||||
NCR_PRINT_PHASE(NDEBUG_PIO);
|
||||
dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO);
|
||||
NCR5380_dprint_phase(NDEBUG_PIO, instance);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1708,24 +1698,24 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
|
||||
if (!((p & SR_MSG) && c > 1)) {
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
|
||||
ICR_ASSERT_DATA);
|
||||
NCR_PRINT(NDEBUG_PIO);
|
||||
NCR5380_dprint(NDEBUG_PIO, instance);
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
|
||||
ICR_ASSERT_DATA | ICR_ASSERT_ACK);
|
||||
} else {
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
|
||||
ICR_ASSERT_DATA | ICR_ASSERT_ATN);
|
||||
NCR_PRINT(NDEBUG_PIO);
|
||||
NCR5380_dprint(NDEBUG_PIO, instance);
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
|
||||
ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
|
||||
}
|
||||
} else {
|
||||
NCR_PRINT(NDEBUG_PIO);
|
||||
NCR5380_dprint(NDEBUG_PIO, instance);
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
|
||||
}
|
||||
|
||||
while (NCR5380_read(STATUS_REG) & SR_REQ);
|
||||
|
||||
HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO);
|
||||
dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);
|
||||
|
||||
/*
|
||||
* We have several special cases to consider during REQ/ACK handshaking :
|
||||
@ -1746,7 +1736,7 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
|
||||
}
|
||||
} while (--c);
|
||||
|
||||
PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c);
|
||||
dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);
|
||||
|
||||
*count = c;
|
||||
*data = d;
|
||||
@ -1854,7 +1844,7 @@ static int NCR5380_transfer_dma( struct Scsi_Host *instance,
|
||||
}
|
||||
hostdata->dma_len = c;
|
||||
|
||||
DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n",
|
||||
dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
|
||||
HOSTNO, (p & SR_IO) ? "reading" : "writing",
|
||||
c, (p & SR_IO) ? "to" : "from", *data);
|
||||
|
||||
@ -1931,7 +1921,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
phase = (tmp & PHASE_MASK);
|
||||
if (phase != old_phase) {
|
||||
old_phase = phase;
|
||||
NCR_PRINT_PHASE(NDEBUG_INFORMATION);
|
||||
NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
|
||||
}
|
||||
|
||||
if(phase == PHASE_CMDOUT) {
|
||||
@ -1996,7 +1986,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
--cmd->SCp.buffers_residual;
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = SGADDR(cmd->SCp.buffer);
|
||||
INF_PRINTK("scsi%d: %d bytes and %d buffers left\n",
|
||||
dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",
|
||||
HOSTNO, cmd->SCp.this_residual,
|
||||
cmd->SCp.buffers_residual);
|
||||
}
|
||||
@ -2088,7 +2078,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
/* Accept message by clearing ACK */
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
|
||||
LNK_PRINTK("scsi%d: target %d lun %d linked command "
|
||||
dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "
|
||||
"complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
|
||||
|
||||
/* Enable reselect interrupts */
|
||||
@ -2113,7 +2103,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
* and don't free it! */
|
||||
cmd->next_link->tag = cmd->tag;
|
||||
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
|
||||
LNK_PRINTK("scsi%d: target %d lun %d linked request "
|
||||
dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "
|
||||
"done, calling scsi_done().\n",
|
||||
HOSTNO, cmd->device->id, cmd->device->lun);
|
||||
#ifdef NCR5380_STATS
|
||||
@ -2128,7 +2118,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
/* Accept message by clearing ACK */
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
hostdata->connected = NULL;
|
||||
QU_PRINTK("scsi%d: command for target %d, lun %d "
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "
|
||||
"completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
|
||||
#ifdef SUPPORT_TAGS
|
||||
cmd_free_tag( cmd );
|
||||
@ -2142,7 +2132,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
/* ++Andreas: the mid level code knows about
|
||||
QUEUE_FULL now. */
|
||||
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
|
||||
TAG_PRINTK("scsi%d: target %d lun %d returned "
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "
|
||||
"QUEUE_FULL after %d commands\n",
|
||||
HOSTNO, cmd->device->id, cmd->device->lun,
|
||||
ta->nr_allocated);
|
||||
@ -2186,7 +2176,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
if ((cmd->cmnd[0] != REQUEST_SENSE) &&
|
||||
(status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
|
||||
scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
|
||||
ASEN_PRINTK("scsi%d: performing request sense\n",
|
||||
dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n",
|
||||
HOSTNO);
|
||||
/* this is initialized from initialize_SCp
|
||||
cmd->SCp.buffer = NULL;
|
||||
@ -2198,7 +2188,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
SET_NEXT(cmd, hostdata->issue_queue);
|
||||
hostdata->issue_queue = (struct scsi_cmnd *) cmd;
|
||||
local_irq_restore(flags);
|
||||
QU_PRINTK("scsi%d: REQUEST SENSE added to head of "
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
|
||||
"issue queue\n", H_NO(cmd));
|
||||
} else
|
||||
#endif /* def AUTOSENSE */
|
||||
@ -2238,7 +2228,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
cmd->device->tagged_supported = 0;
|
||||
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
|
||||
cmd->tag = TAG_NONE;
|
||||
TAG_PRINTK("scsi%d: target %d lun %d rejected "
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "
|
||||
"QUEUE_TAG message; tagged queuing "
|
||||
"disabled\n",
|
||||
HOSTNO, cmd->device->id, cmd->device->lun);
|
||||
@ -2255,7 +2245,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
hostdata->connected = NULL;
|
||||
hostdata->disconnected_queue = cmd;
|
||||
local_irq_restore(flags);
|
||||
QU_PRINTK("scsi%d: command for target %d lun %d was "
|
||||
dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "
|
||||
"moved from connected to the "
|
||||
"disconnected_queue\n", HOSTNO,
|
||||
cmd->device->id, cmd->device->lun);
|
||||
@ -2308,13 +2298,13 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
/* Accept first byte by clearing ACK */
|
||||
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
|
||||
|
||||
EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO);
|
||||
dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);
|
||||
|
||||
len = 2;
|
||||
data = extended_msg + 1;
|
||||
phase = PHASE_MSGIN;
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data);
|
||||
EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO,
|
||||
dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,
|
||||
(int)extended_msg[1], (int)extended_msg[2]);
|
||||
|
||||
if (!len && extended_msg[1] <=
|
||||
@ -2326,7 +2316,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
phase = PHASE_MSGIN;
|
||||
|
||||
NCR5380_transfer_pio(instance, &phase, &len, &data);
|
||||
EXT_PRINTK("scsi%d: message received, residual %d\n",
|
||||
dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",
|
||||
HOSTNO, len);
|
||||
|
||||
switch (extended_msg[2]) {
|
||||
@ -2416,7 +2406,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
||||
break;
|
||||
default:
|
||||
printk("scsi%d: unknown phase\n", HOSTNO);
|
||||
NCR_PRINT(NDEBUG_ANY);
|
||||
NCR5380_dprint(NDEBUG_ANY, instance);
|
||||
} /* switch(phase) */
|
||||
} /* if (tmp * SR_REQ) */
|
||||
} /* while (1) */
|
||||
@ -2458,7 +2448,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
|
||||
|
||||
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
|
||||
|
||||
RSL_PRINTK("scsi%d: reselect\n", HOSTNO);
|
||||
dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);
|
||||
|
||||
/*
|
||||
* At this point, we have detected that our SCSI ID is on the bus,
|
||||
@ -2580,14 +2570,14 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
|
||||
if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
|
||||
msg[1] == SIMPLE_QUEUE_TAG)
|
||||
tag = msg[2];
|
||||
TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at "
|
||||
dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "
|
||||
"reselection\n", HOSTNO, target_mask, lun, tag);
|
||||
}
|
||||
#endif
|
||||
|
||||
hostdata->connected = tmp;
|
||||
RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
|
||||
HOSTNO, tmp->target, tmp->lun, tmp->tag);
|
||||
dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
|
||||
HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
|
||||
}
|
||||
|
||||
|
||||
@ -2622,7 +2612,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
|
||||
NCR5380_read(BUS_AND_STATUS_REG),
|
||||
NCR5380_read(STATUS_REG));
|
||||
|
||||
@ -2635,7 +2625,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
|
||||
|
||||
if (hostdata->connected == cmd) {
|
||||
|
||||
ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO);
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);
|
||||
/*
|
||||
* We should perform BSY checking, and make sure we haven't slipped
|
||||
* into BUS FREE.
|
||||
@ -2664,11 +2654,11 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
|
||||
#endif
|
||||
local_irq_restore(flags);
|
||||
cmd->scsi_done(cmd);
|
||||
return SCSI_ABORT_SUCCESS;
|
||||
return SUCCESS;
|
||||
} else {
|
||||
/* local_irq_restore(flags); */
|
||||
printk("scsi%d: abort of connected command failed!\n", HOSTNO);
|
||||
return SCSI_ABORT_ERROR;
|
||||
return FAILED;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -2686,12 +2676,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
|
||||
SET_NEXT(tmp, NULL);
|
||||
tmp->result = DID_ABORT << 16;
|
||||
local_irq_restore(flags);
|
||||
ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n",
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
|
||||
HOSTNO);
|
||||
/* Tagged queuing note: no tag to free here, hasn't been assigned
|
||||
* yet... */
|
||||
tmp->scsi_done(tmp);
|
||||
return SCSI_ABORT_SUCCESS;
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2707,8 +2697,8 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
|
||||
|
||||
if (hostdata->connected) {
|
||||
local_irq_restore(flags);
|
||||
ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO);
|
||||
return SCSI_ABORT_SNOOZE;
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2740,12 +2730,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
|
||||
tmp = NEXT(tmp))
|
||||
if (cmd == tmp) {
|
||||
local_irq_restore(flags);
|
||||
ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO);
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
|
||||
|
||||
if (NCR5380_select (instance, cmd, (int) cmd->tag))
|
||||
return SCSI_ABORT_BUSY;
|
||||
return FAILED;
|
||||
|
||||
ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO);
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
|
||||
|
||||
do_abort (instance);
|
||||
|
||||
@ -2769,7 +2759,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
|
||||
#endif
|
||||
local_irq_restore(flags);
|
||||
tmp->scsi_done(tmp);
|
||||
return SCSI_ABORT_SUCCESS;
|
||||
return SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2786,7 +2776,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
|
||||
local_irq_restore(flags);
|
||||
printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
|
||||
|
||||
return SCSI_ABORT_NOT_RUNNING;
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
|
||||
@ -2795,7 +2785,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
|
||||
*
|
||||
* Purpose : reset the SCSI bus.
|
||||
*
|
||||
* Returns : SCSI_RESET_WAKEUP
|
||||
* Returns : SUCCESS or FAILURE
|
||||
*
|
||||
*/
|
||||
|
||||
@ -2804,7 +2794,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
|
||||
SETUP_HOSTDATA(cmd->device->host);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
#if 1
|
||||
#if defined(RESET_RUN_DONE)
|
||||
struct scsi_cmnd *connected, *disconnected_queue;
|
||||
#endif
|
||||
|
||||
@ -2826,8 +2816,15 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
|
||||
* through anymore ... */
|
||||
(void)NCR5380_read( RESET_PARITY_INTERRUPT_REG );
|
||||
|
||||
#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */
|
||||
/* XXX see below XXX */
|
||||
/* MSch 20140115 - looking at the generic NCR5380 driver, all of this
|
||||
* should go.
|
||||
* Catch-22: if we don't clear all queues, the SCSI driver lock will
|
||||
* not be released by atari_scsi_reset()!
|
||||
*/
|
||||
|
||||
#if defined(RESET_RUN_DONE)
|
||||
/* XXX Should now be done by midlevel code, but it's broken XXX */
|
||||
/* XXX see below XXX */
|
||||
|
||||
/* MSch: old-style reset: actually abort all command processing here */
|
||||
|
||||
@ -2857,7 +2854,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
|
||||
*/
|
||||
|
||||
if ((cmd = connected)) {
|
||||
ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
|
||||
cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
|
||||
cmd->scsi_done( cmd );
|
||||
}
|
||||
@ -2869,14 +2866,14 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
|
||||
cmd->scsi_done( cmd );
|
||||
}
|
||||
if (i > 0)
|
||||
ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i);
|
||||
dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
|
||||
|
||||
|
||||
/* since all commands have been explicitly terminated, we need to tell
|
||||
* the midlevel code that the reset was SUCCESSFUL, and there is no
|
||||
* need to 'wake up' the commands by a request_sense
|
||||
*/
|
||||
return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET;
|
||||
return SUCCESS;
|
||||
#else /* 1 */
|
||||
|
||||
/* MSch: new-style reset handling: let the mid-level do what it can */
|
||||
@ -2903,11 +2900,11 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
|
||||
*/
|
||||
|
||||
if (hostdata->issue_queue)
|
||||
ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
|
||||
if (hostdata->connected)
|
||||
ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
|
||||
if (hostdata->disconnected_queue)
|
||||
ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
|
||||
dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
|
||||
|
||||
local_irq_save(flags);
|
||||
hostdata->issue_queue = NULL;
|
||||
@ -2924,7 +2921,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* we did no complete reset of all commands, so a wakeup is required */
|
||||
return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET;
|
||||
return SUCCESS;
|
||||
#endif /* 1 */
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user