2020-09-08 12:34:48 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-27 18:10:08 +00:00
|
|
|
* QLogic Fibre Channel HBA Driver
|
2014-04-11 20:54:24 +00:00
|
|
|
* Copyright (c) 2003-2014 QLogic Corporation
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#include "qla_def.h"
|
|
|
|
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/delay.h>
|
2006-02-14 17:46:22 +00:00
|
|
|
#include <linux/kthread.h>
|
2008-05-13 05:21:10 +00:00
|
|
|
#include <linux/mutex.h>
|
2009-10-13 22:16:45 +00:00
|
|
|
#include <linux/kobject.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2016-12-12 22:40:08 +00:00
|
|
|
#include <linux/blk-mq-pci.h>
|
2018-09-04 21:19:20 +00:00
|
|
|
#include <linux/refcount.h>
|
2021-08-10 04:37:17 +00:00
|
|
|
#include <linux/crash_dump.h>
|
2018-09-04 21:19:20 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <scsi/scsi_tcq.h>
|
|
|
|
#include <scsi/scsicam.h>
|
|
|
|
#include <scsi/scsi_transport.h>
|
|
|
|
#include <scsi/scsi_transport_fc.h>
|
|
|
|
|
2012-05-15 18:34:28 +00:00
|
|
|
#include "qla_target.h"
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Driver version
|
|
|
|
*/
|
|
|
|
char qla2x00_version_str[40];
|
|
|
|
|
2010-05-04 22:01:24 +00:00
|
|
|
static int apidev_major;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* SRB allocation cache
|
|
|
|
*/
|
2016-12-12 22:40:07 +00:00
|
|
|
struct kmem_cache *srb_cachep;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-03-31 10:40:13 +00:00
|
|
|
int ql2xfulldump_on_mpifail;
|
|
|
|
module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
|
|
|
|
"Set this to take full dump on MPI hang.");
|
|
|
|
|
2020-09-04 04:51:26 +00:00
|
|
|
int ql2xenforce_iocb_limit = 1;
|
|
|
|
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
|
2021-01-18 18:49:22 +00:00
|
|
|
"Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
|
2020-09-04 04:51:26 +00:00
|
|
|
|
2010-04-13 00:59:55 +00:00
|
|
|
/*
|
|
|
|
* CT6 CTX allocation cache
|
|
|
|
*/
|
|
|
|
static struct kmem_cache *ctx_cachep;
|
2011-07-14 19:00:12 +00:00
|
|
|
/*
|
|
|
|
* error level for logging
|
|
|
|
*/
|
2019-03-12 18:08:22 +00:00
|
|
|
uint ql_errlev = 0x8001;
|
2010-04-13 00:59:55 +00:00
|
|
|
|
2021-06-24 05:26:04 +00:00
|
|
|
int ql2xsecenable;
|
|
|
|
module_param(ql2xsecenable, int, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(ql2xsecenable,
|
|
|
|
"Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled.");
|
|
|
|
|
2012-11-21 07:40:29 +00:00
|
|
|
static int ql2xenableclass2;
|
2012-05-15 18:34:28 +00:00
|
|
|
module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
|
|
|
|
MODULE_PARM_DESC(ql2xenableclass2,
|
|
|
|
"Specify if Class 2 operations are supported from the very "
|
|
|
|
"beginning. Default is 0 - class 2 not supported.");
|
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
int ql2xlogintimeout = 20;
|
2010-12-22 00:00:22 +00:00
|
|
|
module_param(ql2xlogintimeout, int, S_IRUGO);
|
2005-04-16 22:20:36 +00:00
|
|
|
MODULE_PARM_DESC(ql2xlogintimeout,
|
|
|
|
"Login timeout value in seconds.");
|
|
|
|
|
2007-05-07 14:42:59 +00:00
|
|
|
int qlport_down_retry;
|
2010-12-22 00:00:22 +00:00
|
|
|
module_param(qlport_down_retry, int, S_IRUGO);
|
2005-04-16 22:20:36 +00:00
|
|
|
MODULE_PARM_DESC(qlport_down_retry,
|
2006-06-30 09:33:07 +00:00
|
|
|
"Maximum number of command retries to a port that returns "
|
2005-04-16 22:20:36 +00:00
|
|
|
"a PORT-DOWN status.");
|
|
|
|
|
|
|
|
int ql2xplogiabsentdevice;
|
|
|
|
module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(ql2xplogiabsentdevice,
|
|
|
|
"Option to enable PLOGI to devices that are not present after "
|
2006-06-30 09:33:07 +00:00
|
|
|
"a Fabric scan. This is needed for several broken switches. "
|
2018-10-28 05:05:48 +00:00
|
|
|
"Default is 0 - no PLOGI. 1 - perform PLOGI.");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-08-09 03:01:24 +00:00
|
|
|
int ql2xloginretrycount;
|
2010-12-22 00:00:22 +00:00
|
|
|
module_param(ql2xloginretrycount, int, S_IRUGO);
|
2005-04-16 22:20:36 +00:00
|
|
|
MODULE_PARM_DESC(ql2xloginretrycount,
|
|
|
|
"Specify an alternate value for the NVRAM login retry count.");
|
|
|
|
|
2006-06-23 23:10:29 +00:00
|
|
|
int ql2xallocfwdump = 1;
|
2010-12-22 00:00:22 +00:00
|
|
|
module_param(ql2xallocfwdump, int, S_IRUGO);
|
2006-06-23 23:10:29 +00:00
|
|
|
MODULE_PARM_DESC(ql2xallocfwdump,
|
|
|
|
"Option to enable allocation of memory for a firmware dump "
|
|
|
|
"during HBA initialization. Memory allocation requirements "
|
|
|
|
"vary by ISP type. Default is 1 - allocate memory.");
|
|
|
|
|
2006-10-06 16:54:59 +00:00
|
|
|
int ql2xextended_error_logging;
|
2007-03-12 17:41:30 +00:00
|
|
|
module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
|
2016-07-06 15:14:21 +00:00
|
|
|
module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
|
2006-10-06 16:54:59 +00:00
|
|
|
MODULE_PARM_DESC(ql2xextended_error_logging,
|
2011-07-14 19:00:12 +00:00
|
|
|
"Option to enable extended error logging,\n"
|
|
|
|
"\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
|
|
|
|
"\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
|
|
|
|
"\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
|
|
|
|
"\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
|
|
|
|
"\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
|
|
|
|
"\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
|
|
|
|
"\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
|
|
|
|
"\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
|
2012-11-21 07:40:41 +00:00
|
|
|
"\t\t0x00008000 - Verbose. 0x00004000 - Target.\n"
|
|
|
|
"\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n"
|
2011-07-14 19:00:12 +00:00
|
|
|
"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
|
2011-11-18 17:03:07 +00:00
|
|
|
"\t\t0x1e400000 - Preferred value for capturing essential "
|
|
|
|
"debug information (equivalent to old "
|
|
|
|
"ql2xextended_error_logging=1).\n"
|
2011-07-14 19:00:12 +00:00
|
|
|
"\t\tDo LOGICAL OR of the value to enable more than one level");
|
2006-06-23 23:11:10 +00:00
|
|
|
|
2010-04-13 00:59:55 +00:00
|
|
|
int ql2xshiftctondsd = 6;
|
2010-12-22 00:00:22 +00:00
|
|
|
module_param(ql2xshiftctondsd, int, S_IRUGO);
|
2010-04-13 00:59:55 +00:00
|
|
|
MODULE_PARM_DESC(ql2xshiftctondsd,
|
|
|
|
"Set to control shifting of command type processing "
|
|
|
|
"based on total number of SG elements.");
|
|
|
|
|
2019-04-11 21:53:19 +00:00
|
|
|
int ql2xfdmienable = 1;
|
2014-09-25 09:16:50 +00:00
|
|
|
module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
|
2016-07-06 15:14:21 +00:00
|
|
|
module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
|
2005-08-27 02:08:30 +00:00
|
|
|
MODULE_PARM_DESC(ql2xfdmienable,
|
2010-03-23 17:14:59 +00:00
|
|
|
"Enables FDMI registrations. "
|
2020-02-12 21:44:19 +00:00
|
|
|
"0 - no FDMI registrations. "
|
|
|
|
"1 - provide FDMI registrations (default).");
|
2005-08-27 02:08:30 +00:00
|
|
|
|
2017-08-23 22:05:20 +00:00
|
|
|
#define MAX_Q_DEPTH 64
|
2013-10-30 07:38:14 +00:00
|
|
|
static int ql2xmaxqdepth = MAX_Q_DEPTH;
|
2006-10-13 16:33:39 +00:00
|
|
|
module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(ql2xmaxqdepth,
|
2012-08-22 18:21:23 +00:00
|
|
|
"Maximum queue depth to set for each LUN. "
|
2017-08-23 22:05:20 +00:00
|
|
|
"Default is 64.");
|
2006-10-13 16:33:39 +00:00
|
|
|
|
2012-08-22 18:21:31 +00:00
|
|
|
int ql2xenabledif = 2;
|
|
|
|
module_param(ql2xenabledif, int, S_IRUGO);
|
2010-05-04 22:01:30 +00:00
|
|
|
MODULE_PARM_DESC(ql2xenabledif,
|
2014-02-04 18:50:35 +00:00
|
|
|
" Enable T10-CRC-DIF:\n"
|
|
|
|
" Default is 2.\n"
|
|
|
|
" 0 -- No DIF Support\n"
|
|
|
|
" 1 -- Enable DIF for all types\n"
|
|
|
|
" 2 -- Enable DIF for all types, except Type 0.\n");
|
2010-05-04 22:01:30 +00:00
|
|
|
|
2017-06-21 20:48:43 +00:00
|
|
|
#if (IS_ENABLED(CONFIG_NVME_FC))
|
|
|
|
int ql2xnvmeenable = 1;
|
|
|
|
#else
|
|
|
|
int ql2xnvmeenable;
|
|
|
|
#endif
|
|
|
|
module_param(ql2xnvmeenable, int, 0644);
|
|
|
|
MODULE_PARM_DESC(ql2xnvmeenable,
|
|
|
|
"Enables NVME support. "
|
|
|
|
"0 - no NVMe. Default is Y");
|
|
|
|
|
2011-08-16 18:29:22 +00:00
|
|
|
int ql2xenablehba_err_chk = 2;
|
2010-05-04 22:01:30 +00:00
|
|
|
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(ql2xenablehba_err_chk,
|
2011-08-16 18:29:22 +00:00
|
|
|
" Enable T10-CRC-DIF Error isolation by HBA:\n"
|
2014-02-04 18:50:35 +00:00
|
|
|
" Default is 2.\n"
|
2011-08-16 18:29:22 +00:00
|
|
|
" 0 -- Error isolation disabled\n"
|
|
|
|
" 1 -- Error isolation enabled only for DIX Type 0\n"
|
|
|
|
" 2 -- Error isolation enabled for all Types\n");
|
2010-05-04 22:01:30 +00:00
|
|
|
|
2019-04-11 21:53:19 +00:00
|
|
|
int ql2xiidmaenable = 1;
|
2010-12-22 00:00:22 +00:00
|
|
|
module_param(ql2xiidmaenable, int, S_IRUGO);
|
2008-07-10 23:55:52 +00:00
|
|
|
MODULE_PARM_DESC(ql2xiidmaenable,
|
|
|
|
"Enables iIDMA settings "
|
|
|
|
"Default is 1 - perform iIDMA. 0 - no iIDMA.");
|
|
|
|
|
2016-12-12 22:40:07 +00:00
|
|
|
int ql2xmqsupport = 1;
|
|
|
|
module_param(ql2xmqsupport, int, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(ql2xmqsupport,
|
|
|
|
"Enable on demand multiple queue pairs support "
|
|
|
|
"Default is 1 for supported. "
|
|
|
|
"Set it to 0 to turn off mq qpair support.");
|
2009-04-07 05:33:49 +00:00
|
|
|
|
|
|
|
int ql2xfwloadbin;
|
2011-08-16 18:31:47 +00:00
|
|
|
module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
|
2016-07-06 15:14:21 +00:00
|
|
|
module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
|
2009-04-07 05:33:49 +00:00
|
|
|
MODULE_PARM_DESC(ql2xfwloadbin,
|
2011-07-14 19:00:13 +00:00
|
|
|
"Option to specify location from which to load ISP firmware:.\n"
|
|
|
|
" 2 -- load firmware via the request_firmware() (hotplug).\n"
|
2009-04-07 05:33:49 +00:00
|
|
|
" interface.\n"
|
|
|
|
" 1 -- load firmware from flash.\n"
|
|
|
|
" 0 -- use default semantics.\n");
|
|
|
|
|
2010-02-18 18:07:28 +00:00
|
|
|
int ql2xetsenable;
|
2010-12-22 00:00:22 +00:00
|
|
|
module_param(ql2xetsenable, int, S_IRUGO);
|
2010-02-18 18:07:28 +00:00
|
|
|
MODULE_PARM_DESC(ql2xetsenable,
|
|
|
|
"Enables firmware ETS burst."
|
|
|
|
"Default is 0 - skip ETS enablement.");
|
|
|
|
|
2010-05-28 22:08:28 +00:00
|
|
|
int ql2xdbwr = 1;
|
2011-08-16 18:31:47 +00:00
|
|
|
module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
|
2010-04-13 00:59:55 +00:00
|
|
|
MODULE_PARM_DESC(ql2xdbwr,
|
2011-08-16 18:31:44 +00:00
|
|
|
"Option to specify scheme for request queue posting.\n"
|
|
|
|
" 0 -- Regular doorbell.\n"
|
|
|
|
" 1 -- CAMRAM doorbell (faster).\n");
|
2010-04-13 00:59:55 +00:00
|
|
|
|
2010-05-04 22:01:33 +00:00
|
|
|
int ql2xtargetreset = 1;
|
2010-12-22 00:00:22 +00:00
|
|
|
module_param(ql2xtargetreset, int, S_IRUGO);
|
2010-05-04 22:01:33 +00:00
|
|
|
MODULE_PARM_DESC(ql2xtargetreset,
|
|
|
|
"Enable target reset."
|
|
|
|
"Default is 1 - use hw defaults.");
|
|
|
|
|
2010-10-15 18:27:40 +00:00
|
|
|
int ql2xgffidenable;
|
2010-12-22 00:00:22 +00:00
|
|
|
module_param(ql2xgffidenable, int, S_IRUGO);
|
2010-10-15 18:27:40 +00:00
|
|
|
MODULE_PARM_DESC(ql2xgffidenable,
|
|
|
|
"Enables GFF_ID checks of port type. "
|
|
|
|
"Default is 0 - Do not use GFF_ID information.");
|
2010-04-13 00:59:55 +00:00
|
|
|
|
2017-08-23 22:05:19 +00:00
|
|
|
int ql2xasynctmfenable = 1;
|
2010-12-22 00:00:22 +00:00
|
|
|
module_param(ql2xasynctmfenable, int, S_IRUGO);
|
2010-05-04 22:01:29 +00:00
|
|
|
MODULE_PARM_DESC(ql2xasynctmfenable,
|
|
|
|
"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
|
2018-09-11 09:48:11 +00:00
|
|
|
"Default is 1 - Issue TM IOCBs via mailbox mechanism.");
|
2011-03-30 18:46:29 +00:00
|
|
|
|
|
|
|
int ql2xdontresethba;
|
2011-08-16 18:31:47 +00:00
|
|
|
module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
|
2011-03-30 18:46:29 +00:00
|
|
|
MODULE_PARM_DESC(ql2xdontresethba,
|
2011-08-16 18:31:44 +00:00
|
|
|
"Option to specify reset behaviour.\n"
|
|
|
|
" 0 (Default) -- Reset on failure.\n"
|
|
|
|
" 1 -- Do not reset on failure.\n");
|
2011-03-30 18:46:29 +00:00
|
|
|
|
2014-06-25 13:27:38 +00:00
|
|
|
uint64_t ql2xmaxlun = MAX_LUNS;
|
|
|
|
module_param(ql2xmaxlun, ullong, S_IRUGO);
|
2011-05-10 18:30:13 +00:00
|
|
|
MODULE_PARM_DESC(ql2xmaxlun,
|
|
|
|
"Defines the maximum LU number to register with the SCSI "
|
|
|
|
"midlayer. Default is 65535.");
|
|
|
|
|
2011-08-16 18:31:44 +00:00
|
|
|
int ql2xmdcapmask = 0x1F;
|
|
|
|
module_param(ql2xmdcapmask, int, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(ql2xmdcapmask,
|
|
|
|
"Set the Minidump driver capture mask level. "
|
2011-11-18 17:03:14 +00:00
|
|
|
"Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
|
2011-08-16 18:31:44 +00:00
|
|
|
|
2011-11-18 17:02:14 +00:00
|
|
|
int ql2xmdenable = 1;
|
2011-08-16 18:31:44 +00:00
|
|
|
module_param(ql2xmdenable, int, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(ql2xmdenable,
|
|
|
|
"Enable/disable MiniDump. "
|
2011-11-18 17:02:14 +00:00
|
|
|
"0 - MiniDump disabled. "
|
|
|
|
"1 (Default) - MiniDump enabled.");
|
2011-08-16 18:31:44 +00:00
|
|
|
|
2019-08-09 03:01:24 +00:00
|
|
|
int ql2xexlogins;
|
2015-12-17 19:56:56 +00:00
|
|
|
module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(ql2xexlogins,
|
|
|
|
"Number of extended Logins. "
|
|
|
|
"0 (Default)- Disabled.");
|
|
|
|
|
2017-06-02 16:12:03 +00:00
|
|
|
int ql2xexchoffld = 1024;
|
|
|
|
module_param(ql2xexchoffld, uint, 0644);
|
2015-12-17 19:56:57 +00:00
|
|
|
MODULE_PARM_DESC(ql2xexchoffld,
|
2017-06-02 16:12:03 +00:00
|
|
|
"Number of target exchanges.");
|
|
|
|
|
|
|
|
int ql2xiniexchg = 1024;
|
|
|
|
module_param(ql2xiniexchg, uint, 0644);
|
|
|
|
MODULE_PARM_DESC(ql2xiniexchg,
|
|
|
|
"Number of initiator exchanges.");
|
2015-12-17 19:56:57 +00:00
|
|
|
|
2019-08-09 03:01:24 +00:00
|
|
|
int ql2xfwholdabts;
|
2016-01-27 17:03:30 +00:00
|
|
|
module_param(ql2xfwholdabts, int, S_IRUGO);
|
|
|
|
MODULE_PARM_DESC(ql2xfwholdabts,
|
|
|
|
"Allow FW to hold status IOCB until ABTS rsp received. "
|
|
|
|
"0 (Default) Do not set fw option. "
|
|
|
|
"1 - Set fw option to hold ABTS.");
|
|
|
|
|
2017-01-20 06:28:03 +00:00
|
|
|
int ql2xmvasynctoatio = 1;
|
|
|
|
module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(ql2xmvasynctoatio,
|
|
|
|
"Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
|
|
|
|
"0 (Default). Do not move IOCBs"
|
|
|
|
"1 - Move IOCBs.");
|
|
|
|
|
2017-08-23 22:05:07 +00:00
|
|
|
int ql2xautodetectsfp = 1;
|
|
|
|
module_param(ql2xautodetectsfp, int, 0444);
|
|
|
|
MODULE_PARM_DESC(ql2xautodetectsfp,
|
|
|
|
"Detect SFP range and set appropriate distance.\n"
|
|
|
|
"1 (Default): Enable\n");
|
|
|
|
|
2017-10-13 16:34:03 +00:00
|
|
|
int ql2xenablemsix = 1;
|
|
|
|
module_param(ql2xenablemsix, int, 0444);
|
|
|
|
MODULE_PARM_DESC(ql2xenablemsix,
|
|
|
|
"Set to enable MSI or MSI-X interrupt mechanism.\n"
|
|
|
|
" Default is 1, enable MSI-X interrupt mechanism.\n"
|
|
|
|
" 0 -- enable traditional pin-based mechanism.\n"
|
|
|
|
" 1 -- enable MSI-X interrupt mechanism.\n"
|
|
|
|
" 2 -- enable MSI interrupt mechanism.\n");
|
|
|
|
|
2017-12-28 20:33:19 +00:00
|
|
|
int qla2xuseresexchforels;
|
|
|
|
module_param(qla2xuseresexchforels, int, 0444);
|
|
|
|
MODULE_PARM_DESC(qla2xuseresexchforels,
|
|
|
|
"Reserve 1/2 of emergency exchanges for ELS.\n"
|
|
|
|
" 0 (default): disabled");
|
|
|
|
|
2019-04-04 19:44:42 +00:00
|
|
|
static int ql2xprotmask;
|
2018-12-21 17:33:44 +00:00
|
|
|
module_param(ql2xprotmask, int, 0644);
|
|
|
|
MODULE_PARM_DESC(ql2xprotmask,
|
|
|
|
"Override DIF/DIX protection capabilities mask\n"
|
|
|
|
"Default is 0 which sets protection mask based on "
|
|
|
|
"capabilities reported by HBA firmware.\n");
|
|
|
|
|
2019-04-04 19:44:42 +00:00
|
|
|
static int ql2xprotguard;
|
2018-12-21 17:33:44 +00:00
|
|
|
module_param(ql2xprotguard, int, 0644);
|
|
|
|
MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
|
|
|
|
" 0 -- Let HBA firmware decide\n"
|
|
|
|
" 1 -- Force T10 CRC\n"
|
|
|
|
" 2 -- Force IP checksum\n");
|
|
|
|
|
2018-12-21 17:33:45 +00:00
|
|
|
int ql2xdifbundlinginternalbuffers;
|
|
|
|
module_param(ql2xdifbundlinginternalbuffers, int, 0644);
|
|
|
|
MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
|
|
|
|
"Force using internal buffers for DIF information\n"
|
|
|
|
"0 (Default). Based on check.\n"
|
|
|
|
"1 Force using internal buffers\n");
|
|
|
|
|
2020-02-12 21:44:18 +00:00
|
|
|
int ql2xsmartsan;
|
|
|
|
module_param(ql2xsmartsan, int, 0444);
|
|
|
|
module_param_named(smartsan, ql2xsmartsan, int, 0444);
|
|
|
|
MODULE_PARM_DESC(ql2xsmartsan,
|
|
|
|
"Send SmartSAN Management Attributes for FDMI Registration."
|
|
|
|
" Default is 0 - No SmartSAN registration,"
|
|
|
|
" 1 - Register SmartSAN Management Attributes.");
|
|
|
|
|
2020-02-12 21:44:19 +00:00
|
|
|
int ql2xrdpenable;
|
|
|
|
module_param(ql2xrdpenable, int, 0444);
|
|
|
|
module_param_named(rdpenable, ql2xrdpenable, int, 0444);
|
|
|
|
MODULE_PARM_DESC(ql2xrdpenable,
|
|
|
|
"Enables RDP responses. "
|
|
|
|
"0 - no RDP responses (default). "
|
|
|
|
"1 - provide RDP responses.");
|
2021-01-11 09:31:31 +00:00
|
|
|
int ql2xabts_wait_nvme = 1;
|
|
|
|
module_param(ql2xabts_wait_nvme, int, 0444);
|
|
|
|
MODULE_PARM_DESC(ql2xabts_wait_nvme,
|
|
|
|
"To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
|
|
|
|
|
2020-02-12 21:44:18 +00:00
|
|
|
|
2014-08-26 21:11:18 +00:00
|
|
|
static void qla2x00_clear_drv_active(struct qla_hw_data *);
|
2013-06-25 15:27:18 +00:00
|
|
|
static void qla2x00_free_device(scsi_qla_host_t *);
|
2016-12-12 22:40:08 +00:00
|
|
|
static int qla2xxx_map_queues(struct Scsi_Host *shost);
|
2017-06-21 20:48:43 +00:00
|
|
|
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
|
2005-08-27 02:09:30 +00:00
|
|
|
|
2018-07-18 21:29:53 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
|
2007-07-05 20:16:51 +00:00
|
|
|
struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* TODO Convert to inlines
|
|
|
|
*
|
|
|
|
* Timer routines
|
|
|
|
*/
|
|
|
|
|
2007-07-05 20:16:51 +00:00
|
|
|
__inline__ void
|
2017-09-03 20:23:32 +00:00
|
|
|
qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-09-03 20:23:32 +00:00
|
|
|
timer_setup(&vha->timer, qla2x00_timer, 0);
|
2008-11-06 18:40:51 +00:00
|
|
|
vha->timer.expires = jiffies + interval * HZ;
|
|
|
|
add_timer(&vha->timer);
|
|
|
|
vha->timer_active = 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-04-13 00:59:55 +00:00
|
|
|
/* Currently used for 82XX only. */
|
2011-07-14 19:00:13 +00:00
|
|
|
if (vha->device_flags & DFLG_DEV_FAILED) {
|
|
|
|
ql_dbg(ql_dbg_timer, vha, 0x600d,
|
|
|
|
"Device in a failed state, returning.\n");
|
2010-04-13 00:59:55 +00:00
|
|
|
return;
|
2011-07-14 19:00:13 +00:00
|
|
|
}
|
2010-04-13 00:59:55 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
mod_timer(&vha->timer, jiffies + interval * HZ);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-17 17:02:15 +00:00
|
|
|
static __inline__ void
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_stop_timer(scsi_qla_host_t *vha)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
del_timer_sync(&vha->timer);
|
|
|
|
vha->timer_active = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int qla2x00_do_dpc(void *data);
|
|
|
|
|
|
|
|
static void qla2x00_rst_aen(scsi_qla_host_t *);
|
|
|
|
|
2008-12-10 00:45:39 +00:00
|
|
|
static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
|
|
|
|
struct req_que **, struct rsp_que **);
|
2010-10-15 18:27:46 +00:00
|
|
|
static void qla2x00_free_fw_dump(struct qla_hw_data *);
|
2008-11-06 18:40:51 +00:00
|
|
|
static void qla2x00_mem_free(struct qla_hw_data *);
|
2016-12-12 22:40:07 +00:00
|
|
|
int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
|
|
|
|
struct qla_qpair *qpair);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* -------------------------------------------------------------------------- */
|
2017-06-14 03:47:24 +00:00
|
|
|
static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
|
|
|
|
struct rsp_que *rsp)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2019-04-11 21:53:17 +00:00
|
|
|
|
2017-06-14 03:47:24 +00:00
|
|
|
rsp->qpair = ha->base_qpair;
|
|
|
|
rsp->req = req;
|
2018-09-04 21:19:12 +00:00
|
|
|
ha->base_qpair->hw = ha;
|
2017-06-14 03:47:24 +00:00
|
|
|
ha->base_qpair->req = req;
|
|
|
|
ha->base_qpair->rsp = rsp;
|
|
|
|
ha->base_qpair->vha = vha;
|
|
|
|
ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
|
|
|
|
ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
|
|
|
|
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
|
2018-09-04 21:19:15 +00:00
|
|
|
ha->base_qpair->srb_mempool = ha->srb_mempool;
|
2017-06-14 03:47:24 +00:00
|
|
|
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
|
|
|
|
ha->base_qpair->enable_class_2 = ql2xenableclass2;
|
|
|
|
/* init qpair to this cpu. Will adjust at run time. */
|
2017-11-06 19:59:05 +00:00
|
|
|
qla_cpu_update(rsp->qpair, raw_smp_processor_id());
|
2017-06-14 03:47:24 +00:00
|
|
|
ha->base_qpair->pdev = ha->pdev;
|
|
|
|
|
2019-03-12 18:08:13 +00:00
|
|
|
if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
|
2017-06-14 03:47:24 +00:00
|
|
|
ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
|
|
|
|
}
|
|
|
|
|
2012-05-15 18:34:14 +00:00
|
|
|
static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
|
|
|
|
struct rsp_que *rsp)
|
2008-12-10 00:45:39 +00:00
|
|
|
{
|
2011-07-14 19:00:13 +00:00
|
|
|
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
2019-04-11 21:53:17 +00:00
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:03:40 +00:00
|
|
|
ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
|
2008-12-10 00:45:39 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ha->req_q_map) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, vha, 0x003b,
|
|
|
|
"Unable to allocate memory for request queue ptrs.\n");
|
2008-12-10 00:45:39 +00:00
|
|
|
goto fail_req_map;
|
|
|
|
}
|
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:03:40 +00:00
|
|
|
ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *),
|
2008-12-10 00:45:39 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ha->rsp_q_map) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, vha, 0x003c,
|
|
|
|
"Unable to allocate memory for response queue ptrs.\n");
|
2008-12-10 00:45:39 +00:00
|
|
|
goto fail_rsp_map;
|
|
|
|
}
|
2016-12-12 22:40:07 +00:00
|
|
|
|
2017-06-14 03:47:18 +00:00
|
|
|
ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
|
|
|
|
if (ha->base_qpair == NULL) {
|
|
|
|
ql_log(ql_log_warn, vha, 0x00e0,
|
|
|
|
"Failed to allocate base queue pair memory.\n");
|
|
|
|
goto fail_base_qpair;
|
|
|
|
}
|
|
|
|
|
2017-06-14 03:47:24 +00:00
|
|
|
qla_init_base_qpair(vha, req, rsp);
|
2017-06-14 03:47:18 +00:00
|
|
|
|
2017-10-13 22:43:22 +00:00
|
|
|
if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
|
2016-12-12 22:40:07 +00:00
|
|
|
ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ha->queue_pair_map) {
|
|
|
|
ql_log(ql_log_fatal, vha, 0x0180,
|
|
|
|
"Unable to allocate memory for queue pair ptrs.\n");
|
|
|
|
goto fail_qpair_map;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-15 18:34:14 +00:00
|
|
|
/*
|
|
|
|
* Make sure we record at least the request and response queue zero in
|
|
|
|
* case we need to free them if part of the probe fails.
|
|
|
|
*/
|
|
|
|
ha->rsp_q_map[0] = rsp;
|
|
|
|
ha->req_q_map[0] = req;
|
2008-12-10 00:45:39 +00:00
|
|
|
set_bit(0, ha->rsp_qid_map);
|
|
|
|
set_bit(0, ha->req_qid_map);
|
2018-03-05 05:02:55 +00:00
|
|
|
return 0;
|
2008-12-10 00:45:39 +00:00
|
|
|
|
2016-12-12 22:40:07 +00:00
|
|
|
fail_qpair_map:
|
2017-06-14 03:47:17 +00:00
|
|
|
kfree(ha->base_qpair);
|
|
|
|
ha->base_qpair = NULL;
|
|
|
|
fail_base_qpair:
|
2016-12-12 22:40:07 +00:00
|
|
|
kfree(ha->rsp_q_map);
|
|
|
|
ha->rsp_q_map = NULL;
|
2008-12-10 00:45:39 +00:00
|
|
|
fail_rsp_map:
|
|
|
|
kfree(ha->req_q_map);
|
|
|
|
ha->req_q_map = NULL;
|
|
|
|
fail_req_map:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2009-04-07 05:33:40 +00:00
|
|
|
static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
|
2008-12-10 00:45:39 +00:00
|
|
|
{
|
2013-03-28 12:21:23 +00:00
|
|
|
if (IS_QLAFX00(ha)) {
|
|
|
|
if (req && req->ring_fx00)
|
|
|
|
dma_free_coherent(&ha->pdev->dev,
|
|
|
|
(req->length_fx00 + 1) * sizeof(request_t),
|
|
|
|
req->ring_fx00, req->dma_fx00);
|
|
|
|
} else if (req && req->ring)
|
2008-12-10 00:45:39 +00:00
|
|
|
dma_free_coherent(&ha->pdev->dev,
|
|
|
|
(req->length + 1) * sizeof(request_t),
|
|
|
|
req->ring, req->dma);
|
|
|
|
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
if (req)
|
2013-01-30 08:34:37 +00:00
|
|
|
kfree(req->outstanding_cmds);
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
|
|
|
|
kfree(req);
|
2008-12-10 00:45:39 +00:00
|
|
|
}
|
|
|
|
|
2009-04-07 05:33:40 +00:00
|
|
|
static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
|
|
|
|
{
|
2013-03-28 12:21:23 +00:00
|
|
|
if (IS_QLAFX00(ha)) {
|
2018-03-08 13:44:37 +00:00
|
|
|
if (rsp && rsp->ring_fx00)
|
2013-03-28 12:21:23 +00:00
|
|
|
dma_free_coherent(&ha->pdev->dev,
|
|
|
|
(rsp->length_fx00 + 1) * sizeof(request_t),
|
|
|
|
rsp->ring_fx00, rsp->dma_fx00);
|
|
|
|
} else if (rsp && rsp->ring) {
|
2009-04-07 05:33:40 +00:00
|
|
|
dma_free_coherent(&ha->pdev->dev,
|
|
|
|
(rsp->length + 1) * sizeof(response_t),
|
|
|
|
rsp->ring, rsp->dma);
|
2013-03-28 12:21:23 +00:00
|
|
|
}
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
kfree(rsp);
|
2009-04-07 05:33:40 +00:00
|
|
|
}
|
|
|
|
|
2008-12-10 00:45:39 +00:00
|
|
|
static void qla2x00_free_queues(struct qla_hw_data *ha)
|
|
|
|
{
|
|
|
|
struct req_que *req;
|
|
|
|
struct rsp_que *rsp;
|
|
|
|
int cnt;
|
2016-12-12 22:40:09 +00:00
|
|
|
unsigned long flags;
|
2008-12-10 00:45:39 +00:00
|
|
|
|
2017-06-14 03:47:17 +00:00
|
|
|
if (ha->queue_pair_map) {
|
|
|
|
kfree(ha->queue_pair_map);
|
|
|
|
ha->queue_pair_map = NULL;
|
|
|
|
}
|
|
|
|
if (ha->base_qpair) {
|
|
|
|
kfree(ha->base_qpair);
|
|
|
|
ha->base_qpair = NULL;
|
|
|
|
}
|
|
|
|
|
2016-12-12 22:40:09 +00:00
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
2009-04-07 05:33:40 +00:00
|
|
|
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
|
2016-02-04 16:45:16 +00:00
|
|
|
if (!test_bit(cnt, ha->req_qid_map))
|
|
|
|
continue;
|
|
|
|
|
2008-12-10 00:45:39 +00:00
|
|
|
req = ha->req_q_map[cnt];
|
2016-12-12 22:40:09 +00:00
|
|
|
clear_bit(cnt, ha->req_qid_map);
|
|
|
|
ha->req_q_map[cnt] = NULL;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
2009-04-07 05:33:40 +00:00
|
|
|
qla2x00_free_req_que(ha, req);
|
2016-12-12 22:40:09 +00:00
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
2008-12-10 00:45:39 +00:00
|
|
|
}
|
2016-12-12 22:40:09 +00:00
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
2008-12-10 00:45:39 +00:00
|
|
|
kfree(ha->req_q_map);
|
|
|
|
ha->req_q_map = NULL;
|
2009-04-07 05:33:40 +00:00
|
|
|
|
2016-12-12 22:40:09 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
2009-04-07 05:33:40 +00:00
|
|
|
for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
|
2016-02-04 16:45:16 +00:00
|
|
|
if (!test_bit(cnt, ha->rsp_qid_map))
|
|
|
|
continue;
|
|
|
|
|
2009-04-07 05:33:40 +00:00
|
|
|
rsp = ha->rsp_q_map[cnt];
|
2016-12-27 18:13:21 +00:00
|
|
|
clear_bit(cnt, ha->rsp_qid_map);
|
2016-12-12 22:40:09 +00:00
|
|
|
ha->rsp_q_map[cnt] = NULL;
|
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
2009-04-07 05:33:40 +00:00
|
|
|
qla2x00_free_rsp_que(ha, rsp);
|
2016-12-12 22:40:09 +00:00
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
2009-04-07 05:33:40 +00:00
|
|
|
}
|
2016-12-12 22:40:09 +00:00
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
2009-04-07 05:33:40 +00:00
|
|
|
kfree(ha->rsp_q_map);
|
|
|
|
ha->rsp_q_map = NULL;
|
2008-12-10 00:45:39 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static char *
|
2019-08-09 03:01:55 +00:00
|
|
|
qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2019-08-09 03:01:55 +00:00
|
|
|
static const char *const pci_bus_modes[] = {
|
2005-04-16 22:20:36 +00:00
|
|
|
"33", "66", "100", "133",
|
|
|
|
};
|
|
|
|
uint16_t pci_bus;
|
|
|
|
|
|
|
|
pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
|
|
|
|
if (pci_bus) {
|
2019-08-09 03:01:55 +00:00
|
|
|
snprintf(str, str_len, "PCI-X (%s MHz)",
|
|
|
|
pci_bus_modes[pci_bus]);
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
|
|
|
pci_bus = (ha->pci_attr & BIT_8) >> 8;
|
2019-08-09 03:01:55 +00:00
|
|
|
snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 03:01:55 +00:00
|
|
|
return str;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-07-06 17:31:47 +00:00
|
|
|
static char *
|
2019-08-09 03:01:55 +00:00
|
|
|
qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
|
2005-07-06 17:31:47 +00:00
|
|
|
{
|
2019-08-09 03:01:55 +00:00
|
|
|
static const char *const pci_bus_modes[] = {
|
|
|
|
"33", "66", "100", "133",
|
|
|
|
};
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2005-07-06 17:31:47 +00:00
|
|
|
uint32_t pci_bus;
|
|
|
|
|
2013-09-06 17:26:24 +00:00
|
|
|
if (pci_is_pcie(ha->pdev)) {
|
|
|
|
uint32_t lstat, lspeed, lwidth;
|
2019-08-09 03:01:55 +00:00
|
|
|
const char *speed_str;
|
2005-07-06 17:31:47 +00:00
|
|
|
|
2013-09-06 17:26:24 +00:00
|
|
|
pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
|
|
|
|
lspeed = lstat & PCI_EXP_LNKCAP_SLS;
|
|
|
|
lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
|
2005-07-06 17:31:47 +00:00
|
|
|
|
2012-11-21 07:40:34 +00:00
|
|
|
switch (lspeed) {
|
|
|
|
case 1:
|
2019-08-09 03:01:55 +00:00
|
|
|
speed_str = "2.5GT/s";
|
2012-11-21 07:40:34 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2019-08-09 03:01:55 +00:00
|
|
|
speed_str = "5.0GT/s";
|
2012-11-21 07:40:34 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2019-08-09 03:01:55 +00:00
|
|
|
speed_str = "8.0GT/s";
|
2012-11-21 07:40:34 +00:00
|
|
|
break;
|
2020-02-26 22:40:05 +00:00
|
|
|
case 4:
|
|
|
|
speed_str = "16.0GT/s";
|
|
|
|
break;
|
2012-11-21 07:40:34 +00:00
|
|
|
default:
|
2019-08-09 03:01:55 +00:00
|
|
|
speed_str = "<unknown>";
|
2012-11-21 07:40:34 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-08-09 03:01:55 +00:00
|
|
|
snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
|
2005-07-06 17:31:47 +00:00
|
|
|
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
|
2019-08-09 03:01:55 +00:00
|
|
|
if (pci_bus == 0 || pci_bus == 8)
|
|
|
|
snprintf(str, str_len, "PCI (%s MHz)",
|
|
|
|
pci_bus_modes[pci_bus >> 3]);
|
|
|
|
else
|
|
|
|
snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
|
|
|
|
pci_bus & 4 ? 2 : 1,
|
|
|
|
pci_bus_modes[pci_bus & 3]);
|
2005-07-06 17:31:47 +00:00
|
|
|
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
2006-11-09 03:55:50 +00:00
|
|
|
static char *
|
2014-09-25 09:16:46 +00:00
|
|
|
qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
char un_str[10];
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2005-07-06 17:32:07 +00:00
|
|
|
|
2014-09-25 09:16:46 +00:00
|
|
|
snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
|
|
|
|
ha->fw_minor_version, ha->fw_subminor_version);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (ha->fw_attributes & BIT_9) {
|
|
|
|
strcat(str, "FLX");
|
|
|
|
return (str);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (ha->fw_attributes & 0xFF) {
|
|
|
|
case 0x7:
|
|
|
|
strcat(str, "EF");
|
|
|
|
break;
|
|
|
|
case 0x17:
|
|
|
|
strcat(str, "TP");
|
|
|
|
break;
|
|
|
|
case 0x37:
|
|
|
|
strcat(str, "IP");
|
|
|
|
break;
|
|
|
|
case 0x77:
|
|
|
|
strcat(str, "VI");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
sprintf(un_str, "(%x)", ha->fw_attributes);
|
|
|
|
strcat(str, un_str);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ha->fw_attributes & 0x100)
|
|
|
|
strcat(str, "X");
|
|
|
|
|
|
|
|
return (str);
|
|
|
|
}
|
|
|
|
|
2006-11-09 03:55:50 +00:00
|
|
|
static char *
|
2014-09-25 09:16:46 +00:00
|
|
|
qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
|
2005-07-06 17:31:47 +00:00
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2005-07-09 00:58:43 +00:00
|
|
|
|
2014-09-25 09:16:46 +00:00
|
|
|
snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
|
2009-01-05 19:18:11 +00:00
|
|
|
ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
|
2005-07-06 17:31:47 +00:00
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
2019-08-09 03:02:04 +00:00
|
|
|
void qla2x00_sp_free_dma(srb_t *sp)
|
2005-07-06 17:31:47 +00:00
|
|
|
{
|
2017-01-20 06:28:04 +00:00
|
|
|
struct qla_hw_data *ha = sp->vha->hw;
|
2012-02-09 19:15:36 +00:00
|
|
|
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
|
2005-07-06 17:31:47 +00:00
|
|
|
|
2012-02-09 19:15:36 +00:00
|
|
|
if (sp->flags & SRB_DMA_VALID) {
|
|
|
|
scsi_dma_unmap(cmd);
|
|
|
|
sp->flags &= ~SRB_DMA_VALID;
|
2011-07-14 19:00:13 +00:00
|
|
|
}
|
2005-07-06 17:31:47 +00:00
|
|
|
|
2012-02-09 19:15:36 +00:00
|
|
|
if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
|
|
|
|
dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
|
|
|
|
scsi_prot_sg_count(cmd), cmd->sc_data_direction);
|
|
|
|
sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
|
|
|
|
/* List assured to be having elements */
|
2019-08-09 03:02:12 +00:00
|
|
|
qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
|
2012-02-09 19:15:36 +00:00
|
|
|
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
|
2019-08-09 03:02:12 +00:00
|
|
|
struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
|
2017-05-25 01:06:24 +00:00
|
|
|
|
|
|
|
dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
|
2012-02-09 19:15:36 +00:00
|
|
|
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
|
2019-08-09 03:02:12 +00:00
|
|
|
struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
|
2005-07-06 17:31:47 +00:00
|
|
|
|
2012-02-09 19:15:36 +00:00
|
|
|
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
|
2017-05-25 01:06:24 +00:00
|
|
|
ctx1->fcp_cmnd_dma);
|
2012-02-09 19:15:36 +00:00
|
|
|
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
|
|
|
|
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
|
|
|
|
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
|
|
|
|
mempool_free(ctx1, ha->ctx_mempool);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 03:02:04 +00:00
|
|
|
void qla2x00_sp_compl(srb_t *sp, int res)
|
2012-02-09 19:15:36 +00:00
|
|
|
{
|
|
|
|
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
|
2019-04-17 21:44:35 +00:00
|
|
|
struct completion *comp = sp->comp;
|
2012-02-09 19:15:36 +00:00
|
|
|
|
2017-08-23 22:05:09 +00:00
|
|
|
sp->free(sp);
|
2019-04-02 21:24:20 +00:00
|
|
|
cmd->result = res;
|
2019-04-02 21:24:33 +00:00
|
|
|
CMD_SP(cmd) = NULL;
|
2012-02-09 19:15:36 +00:00
|
|
|
cmd->scsi_done(cmd);
|
2019-04-17 21:44:35 +00:00
|
|
|
if (comp)
|
|
|
|
complete(comp);
|
2005-07-06 17:31:47 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 03:02:04 +00:00
|
|
|
void qla2xxx_qpair_sp_free_dma(srb_t *sp)
|
2016-12-12 22:40:07 +00:00
|
|
|
{
|
|
|
|
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
|
|
|
|
struct qla_hw_data *ha = sp->fcport->vha->hw;
|
|
|
|
|
|
|
|
if (sp->flags & SRB_DMA_VALID) {
|
|
|
|
scsi_dma_unmap(cmd);
|
|
|
|
sp->flags &= ~SRB_DMA_VALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
|
|
|
|
dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
|
|
|
|
scsi_prot_sg_count(cmd), cmd->sc_data_direction);
|
|
|
|
sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
|
|
|
|
/* List assured to be having elements */
|
2019-08-09 03:02:12 +00:00
|
|
|
qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
|
2016-12-12 22:40:07 +00:00
|
|
|
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
|
|
|
|
}
|
|
|
|
|
2018-12-21 17:33:45 +00:00
|
|
|
if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
|
2019-08-09 03:02:12 +00:00
|
|
|
struct crc_context *difctx = sp->u.scmd.crc_ctx;
|
2018-12-21 17:33:45 +00:00
|
|
|
struct dsd_dma *dif_dsd, *nxt_dsd;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(dif_dsd, nxt_dsd,
|
|
|
|
&difctx->ldif_dma_hndl_list, list) {
|
|
|
|
list_del(&dif_dsd->list);
|
|
|
|
dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
|
|
|
|
dif_dsd->dsd_list_dma);
|
|
|
|
kfree(dif_dsd);
|
|
|
|
difctx->no_dif_bundl--;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(dif_dsd, nxt_dsd,
|
|
|
|
&difctx->ldif_dsd_list, list) {
|
|
|
|
list_del(&dif_dsd->list);
|
|
|
|
dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
|
|
|
|
dif_dsd->dsd_list_dma);
|
|
|
|
kfree(dif_dsd);
|
|
|
|
difctx->no_ldif_dsd--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (difctx->no_ldif_dsd) {
|
|
|
|
ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
|
|
|
|
"%s: difctx->no_ldif_dsd=%x\n",
|
|
|
|
__func__, difctx->no_ldif_dsd);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (difctx->no_dif_bundl) {
|
|
|
|
ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
|
|
|
|
"%s: difctx->no_dif_bundl=%x\n",
|
|
|
|
__func__, difctx->no_dif_bundl);
|
|
|
|
}
|
|
|
|
sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
|
2016-12-12 22:40:07 +00:00
|
|
|
}
|
2019-04-17 21:44:25 +00:00
|
|
|
|
|
|
|
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
|
2019-08-09 03:02:12 +00:00
|
|
|
struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
|
2019-04-17 21:44:25 +00:00
|
|
|
|
|
|
|
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
|
|
|
|
ctx1->fcp_cmnd_dma);
|
|
|
|
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
|
|
|
|
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
|
|
|
|
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
|
|
|
|
mempool_free(ctx1, ha->ctx_mempool);
|
|
|
|
sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
|
2019-08-09 03:02:12 +00:00
|
|
|
struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
|
2019-04-17 21:44:25 +00:00
|
|
|
|
2019-08-09 03:02:12 +00:00
|
|
|
dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
|
2019-04-17 21:44:25 +00:00
|
|
|
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
|
|
|
|
}
|
2016-12-12 22:40:07 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 03:02:04 +00:00
|
|
|
void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
|
2016-12-12 22:40:07 +00:00
|
|
|
{
|
|
|
|
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
|
2019-04-17 21:44:35 +00:00
|
|
|
struct completion *comp = sp->comp;
|
2016-12-12 22:40:07 +00:00
|
|
|
|
2017-08-23 22:05:09 +00:00
|
|
|
sp->free(sp);
|
2019-04-02 21:24:33 +00:00
|
|
|
cmd->result = res;
|
|
|
|
CMD_SP(cmd) = NULL;
|
2016-12-12 22:40:07 +00:00
|
|
|
cmd->scsi_done(cmd);
|
2019-04-17 21:44:35 +00:00
|
|
|
if (comp)
|
|
|
|
complete(comp);
|
2016-12-12 22:40:07 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int
|
2011-02-23 23:27:06 +00:00
|
|
|
qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
2005-07-06 17:31:47 +00:00
|
|
|
{
|
2011-05-10 18:30:08 +00:00
|
|
|
scsi_qla_host_t *vha = shost_priv(host);
|
2005-07-06 17:31:47 +00:00
|
|
|
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
|
[SCSI] update fc_transport for removal of block/unblock functions
We recently went back to implement a board reset. When we perform the
reset, we wanted to tear down the internal data structures and rebuild
them. Unfortunately, when it came to the rport structure, things were
odd. If we deleted them, the scsi targets and sdevs would be
torn down. Not a good thing for a temporary reset. We could block the
rports, but we either maintain the internal structures to keep the
rport reference (perhaps even replicating what's in the transport),
or we have to fatten the fc transport with new search routines to find
the rport (and deal with a case of a dangling rport that the driver
forgets).
It dawned on me that we had actually reached this state incorrectly.
When the fc transport first started, we did the block/unblock first, then
added the rport interface. The purpose of block/unblock is to hide the
temporary disappearance of the rport (e.g. being deleted, then readded).
Why are we making the driver do the block/unblock ? We should be making
the transport have only an rport add/delete, and the let the transport
handle the block/unblock.
So... This patch removes the existing fc_remote_port_block/unblock
functions. It moves the block/unblock functionality into the
fc_remote_port_add/delete functions. Updates for the lpfc driver are
included. Qlogic driver updates are also enclosed, thanks to the
contributions of Andrew Vasquez. [Note: the qla2xxx changes are
relative to the scsi-misc-2.6 tree as of this morning - which does
not include the recent patches sent by Andrew]. The zfcp driver does
not use the block/unblock functions.
One last comment: The resulting behavior feels very clean. The LLDD is
concerned only with add/delete, which corresponds to the physical
disappearance. However, the fact that the scsi target and sdevs are
not immediately torn down after the LLDD calls delete causes an
interesting scenario... the midlayer can call the xxx_slave_alloc and
xxx_queuecommand functions with a sdev that is at the location the
rport used to be. The driver must validate the device exists when it
first enters these functions. In thinking about it, this has always
been the case for the LLDD and these routines. The existing drivers
already check for existence. However, this highlights that simple
validation via data structure dereferencing needs to be watched.
To deal with this, a new transport function, fc_remote_port_chkready()
was created that LLDDs should call when they first enter these two
routines. It validates the rport state, and returns a scsi result
which could be returned. In addition to solving the above, it also
creates consistent behavior from the LLDD's when the block and deletes
are occuring.
Rejections fixed up and
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2005-10-18 16:03:35 +00:00
|
|
|
struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
2005-07-06 17:31:47 +00:00
|
|
|
srb_t *sp;
|
|
|
|
int rval;
|
|
|
|
|
2019-04-17 21:44:19 +00:00
|
|
|
if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
|
|
|
|
WARN_ON_ONCE(!rport)) {
|
2016-11-07 19:53:30 +00:00
|
|
|
cmd->result = DID_NO_CONNECT << 16;
|
|
|
|
goto qc24_fail_command;
|
|
|
|
}
|
|
|
|
|
2016-12-12 22:40:08 +00:00
|
|
|
if (ha->mqenable) {
|
2019-08-09 03:01:31 +00:00
|
|
|
uint32_t tag;
|
|
|
|
uint16_t hwq;
|
|
|
|
struct qla_qpair *qpair = NULL;
|
|
|
|
|
2021-08-09 23:03:41 +00:00
|
|
|
tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
|
2018-11-01 22:36:27 +00:00
|
|
|
hwq = blk_mq_unique_tag_to_hwq(tag);
|
|
|
|
qpair = ha->queue_pair_map[hwq];
|
2016-12-12 22:40:08 +00:00
|
|
|
|
|
|
|
if (qpair)
|
|
|
|
return qla2xxx_mqueuecommand(host, cmd, qpair);
|
2016-12-12 22:40:07 +00:00
|
|
|
}
|
|
|
|
|
2009-12-16 05:29:46 +00:00
|
|
|
if (ha->flags.eeh_busy) {
|
2011-07-14 19:00:13 +00:00
|
|
|
if (ha->flags.pci_channel_io_perm_failure) {
|
2012-05-15 18:34:15 +00:00
|
|
|
ql_dbg(ql_dbg_aer, vha, 0x9010,
|
2011-07-14 19:00:13 +00:00
|
|
|
"PCI Channel IO permanent failure, exiting "
|
|
|
|
"cmd=%p.\n", cmd);
|
2009-03-24 16:08:18 +00:00
|
|
|
cmd->result = DID_NO_CONNECT << 16;
|
2011-07-14 19:00:13 +00:00
|
|
|
} else {
|
2012-05-15 18:34:15 +00:00
|
|
|
ql_dbg(ql_dbg_aer, vha, 0x9011,
|
2011-07-14 19:00:13 +00:00
|
|
|
"EEH_Busy, Requeuing the cmd=%p.\n", cmd);
|
2009-12-16 05:29:46 +00:00
|
|
|
cmd->result = DID_REQUEUE << 16;
|
2011-07-14 19:00:13 +00:00
|
|
|
}
|
2007-09-20 21:07:36 +00:00
|
|
|
goto qc24_fail_command;
|
|
|
|
}
|
|
|
|
|
[SCSI] update fc_transport for removal of block/unblock functions
We recently went back to implement a board reset. When we perform the
reset, we wanted to tear down the internal data structures and rebuild
them. Unfortunately, when it came to the rport structure, things were
odd. If we deleted them, the scsi targets and sdevs would be
torn down. Not a good thing for a temporary reset. We could block the
rports, but we either maintain the internal structures to keep the
rport reference (perhaps even replicating what's in the transport),
or we have to fatten the fc transport with new search routines to find
the rport (and deal with a case of a dangling rport that the driver
forgets).
It dawned on me that we had actually reached this state incorrectly.
When the fc transport first started, we did the block/unblock first, then
added the rport interface. The purpose of block/unblock is to hide the
temporary disappearance of the rport (e.g. being deleted, then readded).
Why are we making the driver do the block/unblock ? We should be making
the transport have only an rport add/delete, and the let the transport
handle the block/unblock.
So... This patch removes the existing fc_remote_port_block/unblock
functions. It moves the block/unblock functionality into the
fc_remote_port_add/delete functions. Updates for the lpfc driver are
included. Qlogic driver updates are also enclosed, thanks to the
contributions of Andrew Vasquez. [Note: the qla2xxx changes are
relative to the scsi-misc-2.6 tree as of this morning - which does
not include the recent patches sent by Andrew]. The zfcp driver does
not use the block/unblock functions.
One last comment: The resulting behavior feels very clean. The LLDD is
concerned only with add/delete, which corresponds to the physical
disappearance. However, the fact that the scsi target and sdevs are
not immediately torn down after the LLDD calls delete causes an
interesting scenario... the midlayer can call the xxx_slave_alloc and
xxx_queuecommand functions with a sdev that is at the location the
rport used to be. The driver must validate the device exists when it
first enters these functions. In thinking about it, this has always
been the case for the LLDD and these routines. The existing drivers
already check for existence. However, this highlights that simple
validation via data structure dereferencing needs to be watched.
To deal with this, a new transport function, fc_remote_port_chkready()
was created that LLDDs should call when they first enter these two
routines. It validates the rport state, and returns a scsi result
which could be returned. In addition to solving the above, it also
creates consistent behavior from the LLDD's when the block and deletes
are occuring.
Rejections fixed up and
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2005-10-18 16:03:35 +00:00
|
|
|
rval = fc_remote_port_chkready(rport);
|
|
|
|
if (rval) {
|
|
|
|
cmd->result = rval;
|
2012-05-15 18:34:15 +00:00
|
|
|
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
|
2011-07-14 19:00:13 +00:00
|
|
|
"fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
|
|
|
|
cmd, rval);
|
2005-07-06 17:31:47 +00:00
|
|
|
goto qc24_fail_command;
|
|
|
|
}
|
|
|
|
|
2010-05-04 22:01:30 +00:00
|
|
|
if (!vha->flags.difdix_supported &&
|
|
|
|
scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_io, vha, 0x3004,
|
|
|
|
"DIF Cap not reg, fail DIF capable cmd's:%p.\n",
|
|
|
|
cmd);
|
2010-05-04 22:01:30 +00:00
|
|
|
cmd->result = DID_NO_CONNECT << 16;
|
|
|
|
goto qc24_fail_command;
|
|
|
|
}
|
2012-02-09 19:14:04 +00:00
|
|
|
|
2020-12-02 13:23:10 +00:00
|
|
|
if (!fcport || fcport->deleted) {
|
|
|
|
cmd->result = DID_IMM_RETRY << 16;
|
2012-02-09 19:14:04 +00:00
|
|
|
goto qc24_fail_command;
|
|
|
|
}
|
|
|
|
|
2020-03-13 08:50:01 +00:00
|
|
|
if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
|
2005-07-06 17:31:47 +00:00
|
|
|
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
|
2010-10-15 18:27:49 +00:00
|
|
|
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_io, vha, 0x3005,
|
|
|
|
"Returning DNC, fcport_state=%d loop_state=%d.\n",
|
|
|
|
atomic_read(&fcport->state),
|
|
|
|
atomic_read(&base_vha->loop_state));
|
2005-07-06 17:31:47 +00:00
|
|
|
cmd->result = DID_NO_CONNECT << 16;
|
|
|
|
goto qc24_fail_command;
|
|
|
|
}
|
2008-08-17 20:24:40 +00:00
|
|
|
goto qc24_target_busy;
|
2005-07-06 17:31:47 +00:00
|
|
|
}
|
|
|
|
|
2014-09-25 09:16:59 +00:00
|
|
|
/*
|
|
|
|
* Return target busy if we've received a non-zero retry_delay_timer
|
|
|
|
* in a FCP_RSP.
|
|
|
|
*/
|
2014-12-19 09:29:16 +00:00
|
|
|
if (fcport->retry_delay_timestamp == 0) {
|
|
|
|
/* retry delay not set */
|
|
|
|
} else if (time_after(jiffies, fcport->retry_delay_timestamp))
|
2014-09-25 09:16:59 +00:00
|
|
|
fcport->retry_delay_timestamp = 0;
|
|
|
|
else
|
|
|
|
goto qc24_target_busy;
|
|
|
|
|
2019-08-09 03:02:06 +00:00
|
|
|
sp = scsi_cmd_priv(cmd);
|
|
|
|
qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
|
2005-07-06 17:31:47 +00:00
|
|
|
|
2012-02-09 19:15:36 +00:00
|
|
|
sp->u.scmd.cmd = cmd;
|
|
|
|
sp->type = SRB_SCSI_CMD;
|
2019-11-05 15:06:54 +00:00
|
|
|
|
2012-02-09 19:15:36 +00:00
|
|
|
CMD_SP(cmd) = (void *)sp;
|
|
|
|
sp->free = qla2x00_sp_free_dma;
|
|
|
|
sp->done = qla2x00_sp_compl;
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
rval = ha->isp_ops->start_scsi(sp);
|
2011-07-14 19:00:13 +00:00
|
|
|
if (rval != QLA_SUCCESS) {
|
2012-11-21 07:40:32 +00:00
|
|
|
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
|
2011-07-14 19:00:13 +00:00
|
|
|
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
|
2005-07-06 17:31:47 +00:00
|
|
|
goto qc24_host_busy_free_sp;
|
2011-07-14 19:00:13 +00:00
|
|
|
}
|
2005-07-06 17:31:47 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
qc24_host_busy_free_sp:
|
2017-08-23 22:05:09 +00:00
|
|
|
sp->free(sp);
|
2005-07-06 17:31:47 +00:00
|
|
|
|
2008-08-17 20:24:40 +00:00
|
|
|
qc24_target_busy:
|
|
|
|
return SCSI_MLQUEUE_TARGET_BUSY;
|
|
|
|
|
2005-07-06 17:31:47 +00:00
|
|
|
qc24_fail_command:
|
2011-02-23 23:27:06 +00:00
|
|
|
cmd->scsi_done(cmd);
|
2005-07-06 17:31:47 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-12 22:40:07 +00:00
|
|
|
/* For MQ supported I/O */
|
|
|
|
int
|
|
|
|
qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
|
|
|
|
struct qla_qpair *qpair)
|
|
|
|
{
|
|
|
|
scsi_qla_host_t *vha = shost_priv(host);
|
|
|
|
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
|
|
|
|
struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
|
|
|
srb_t *sp;
|
|
|
|
int rval;
|
|
|
|
|
2021-01-13 09:04:58 +00:00
|
|
|
rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16);
|
2016-12-12 22:40:07 +00:00
|
|
|
if (rval) {
|
|
|
|
cmd->result = rval;
|
|
|
|
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
|
|
|
|
"fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
|
|
|
|
cmd, rval);
|
|
|
|
goto qc24_fail_command;
|
|
|
|
}
|
|
|
|
|
2021-03-29 08:52:25 +00:00
|
|
|
if (!qpair->online) {
|
|
|
|
ql_dbg(ql_dbg_io, vha, 0x3077,
|
|
|
|
"qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy);
|
|
|
|
cmd->result = DID_NO_CONNECT << 16;
|
|
|
|
goto qc24_fail_command;
|
|
|
|
}
|
|
|
|
|
2020-12-02 13:23:10 +00:00
|
|
|
if (!fcport || fcport->deleted) {
|
|
|
|
cmd->result = DID_IMM_RETRY << 16;
|
2016-12-12 22:40:07 +00:00
|
|
|
goto qc24_fail_command;
|
|
|
|
}
|
|
|
|
|
2020-03-13 08:50:01 +00:00
|
|
|
if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
|
2016-12-12 22:40:07 +00:00
|
|
|
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
|
|
|
|
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
|
|
|
|
ql_dbg(ql_dbg_io, vha, 0x3077,
|
|
|
|
"Returning DNC, fcport_state=%d loop_state=%d.\n",
|
|
|
|
atomic_read(&fcport->state),
|
|
|
|
atomic_read(&base_vha->loop_state));
|
|
|
|
cmd->result = DID_NO_CONNECT << 16;
|
|
|
|
goto qc24_fail_command;
|
|
|
|
}
|
|
|
|
goto qc24_target_busy;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return target busy if we've received a non-zero retry_delay_timer
|
|
|
|
* in a FCP_RSP.
|
|
|
|
*/
|
|
|
|
if (fcport->retry_delay_timestamp == 0) {
|
|
|
|
/* retry delay not set */
|
|
|
|
} else if (time_after(jiffies, fcport->retry_delay_timestamp))
|
|
|
|
fcport->retry_delay_timestamp = 0;
|
|
|
|
else
|
|
|
|
goto qc24_target_busy;
|
|
|
|
|
2019-08-09 03:02:06 +00:00
|
|
|
sp = scsi_cmd_priv(cmd);
|
|
|
|
qla2xxx_init_sp(sp, vha, qpair, fcport);
|
2016-12-12 22:40:07 +00:00
|
|
|
|
|
|
|
sp->u.scmd.cmd = cmd;
|
|
|
|
sp->type = SRB_SCSI_CMD;
|
|
|
|
CMD_SP(cmd) = (void *)sp;
|
|
|
|
sp->free = qla2xxx_qpair_sp_free_dma;
|
|
|
|
sp->done = qla2xxx_qpair_sp_compl;
|
|
|
|
|
|
|
|
rval = ha->isp_ops->start_scsi_mq(sp);
|
|
|
|
if (rval != QLA_SUCCESS) {
|
|
|
|
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
|
|
|
|
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
|
|
|
|
goto qc24_host_busy_free_sp;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
qc24_host_busy_free_sp:
|
2017-08-23 22:05:09 +00:00
|
|
|
sp->free(sp);
|
2016-12-12 22:40:07 +00:00
|
|
|
|
|
|
|
qc24_target_busy:
|
|
|
|
return SCSI_MLQUEUE_TARGET_BUSY;
|
|
|
|
|
|
|
|
qc24_fail_command:
|
|
|
|
cmd->scsi_done(cmd);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* qla2x00_eh_wait_on_command
|
|
|
|
* Waits for the command to be returned by the Firmware for some
|
|
|
|
* max time.
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* cmd = Scsi Command to wait on.
|
|
|
|
*
|
|
|
|
* Return:
|
2019-08-09 03:01:54 +00:00
|
|
|
* Completed in time : QLA_SUCCESS
|
|
|
|
* Did not complete in time : QLA_FUNCTION_FAILED
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
static int
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-27 02:10:10 +00:00
|
|
|
#define ABORT_POLLING_PERIOD 1000
|
2014-04-11 20:54:35 +00:00
|
|
|
#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
|
2005-04-17 20:02:26 +00:00
|
|
|
unsigned long wait_iter = ABORT_WAIT_ITER;
|
2009-12-16 05:29:46 +00:00
|
|
|
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2005-04-17 20:02:26 +00:00
|
|
|
int ret = QLA_SUCCESS;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-12-16 05:29:46 +00:00
|
|
|
if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_taskm, vha, 0x8005,
|
|
|
|
"Return:eh_wait.\n");
|
2009-12-16 05:29:46 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-08-25 18:36:18 +00:00
|
|
|
while (CMD_SP(cmd) && wait_iter--) {
|
2005-08-27 02:10:10 +00:00
|
|
|
msleep(ABORT_POLLING_PERIOD);
|
2005-04-17 20:02:26 +00:00
|
|
|
}
|
|
|
|
if (CMD_SP(cmd))
|
|
|
|
ret = QLA_FUNCTION_FAILED;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* qla2x00_wait_for_hba_online
|
2005-07-06 17:32:07 +00:00
|
|
|
* Wait till the HBA is online after going through
|
2005-04-16 22:20:36 +00:00
|
|
|
* <= MAX_RETRIES_OF_ISP_ABORT or
|
|
|
|
* finally HBA is disabled ie marked offline
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* ha - pointer to host adapter structure
|
2005-07-06 17:32:07 +00:00
|
|
|
*
|
|
|
|
* Note:
|
2005-04-16 22:20:36 +00:00
|
|
|
* Does context switching-Release SPIN_LOCK
|
|
|
|
* (if any) before calling this routine.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* Success (Adapter is online) : 0
|
|
|
|
* Failed (Adapter is offline/disabled) : 1
|
|
|
|
*/
|
2006-02-01 00:05:17 +00:00
|
|
|
int
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-07-06 17:31:47 +00:00
|
|
|
int return_status;
|
|
|
|
unsigned long wait_online;
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-07-06 17:32:07 +00:00
|
|
|
wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
|
2008-11-06 18:40:51 +00:00
|
|
|
while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
|
|
|
|
test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
|
|
|
|
test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
|
|
|
|
ha->dpc_active) && time_before(jiffies, wait_online)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
msleep(1000);
|
|
|
|
}
|
2008-11-06 18:40:51 +00:00
|
|
|
if (base_vha->flags.online)
|
2005-07-06 17:32:07 +00:00
|
|
|
return_status = QLA_SUCCESS;
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
|
|
|
return_status = QLA_FUNCTION_FAILED;
|
|
|
|
|
|
|
|
return (return_status);
|
|
|
|
}
|
|
|
|
|
2017-01-20 06:28:00 +00:00
|
|
|
static inline int test_fcport_count(scsi_qla_host_t *vha)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
unsigned long flags;
|
|
|
|
int res;
|
2021-06-24 05:26:02 +00:00
|
|
|
/* Return 0 = sleep, x=wake */
|
2017-01-20 06:28:00 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
2017-06-02 16:12:01 +00:00
|
|
|
ql_dbg(ql_dbg_init, vha, 0x00ec,
|
|
|
|
"tgt %p, fcport_count=%d\n",
|
|
|
|
vha, vha->fcport_count);
|
2017-01-20 06:28:00 +00:00
|
|
|
res = (vha->fcport_count == 0);
|
2021-06-24 05:26:02 +00:00
|
|
|
if (res) {
|
|
|
|
struct fc_port *fcport;
|
|
|
|
|
|
|
|
list_for_each_entry(fcport, &vha->vp_fcports, list) {
|
|
|
|
if (fcport->deleted != QLA_SESS_DELETED) {
|
|
|
|
/* session(s) may not be fully logged in
|
|
|
|
* (ie fcport_count=0), but session
|
|
|
|
* deletion thread(s) may be inflight.
|
|
|
|
*/
|
|
|
|
|
|
|
|
res = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-01-20 06:28:00 +00:00
|
|
|
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* qla2x00_wait_for_sess_deletion can only be called from remove_one.
|
|
|
|
* it has dependency on UNLOADING flag to stop device discovery
|
|
|
|
*/
|
2018-07-18 21:29:52 +00:00
|
|
|
void
|
2017-01-20 06:28:00 +00:00
|
|
|
qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
|
|
|
|
{
|
2019-09-12 18:09:08 +00:00
|
|
|
u8 i;
|
|
|
|
|
2019-12-17 22:06:04 +00:00
|
|
|
qla2x00_mark_all_devices_lost(vha);
|
2017-01-20 06:28:00 +00:00
|
|
|
|
2019-11-05 14:56:00 +00:00
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
|
if (wait_event_timeout(vha->fcport_waitQ,
|
|
|
|
test_fcport_count(vha), HZ) > 0)
|
|
|
|
break;
|
|
|
|
}
|
2019-09-12 18:09:08 +00:00
|
|
|
|
2019-09-12 18:09:07 +00:00
|
|
|
flush_workqueue(vha->hw->wq);
|
2017-01-20 06:28:00 +00:00
|
|
|
}
|
|
|
|
|
2010-05-04 22:01:32 +00:00
|
|
|
/*
|
2014-04-11 20:54:38 +00:00
|
|
|
* qla2x00_wait_for_hba_ready
|
|
|
|
* Wait till the HBA is ready before doing driver unload
|
2010-05-04 22:01:32 +00:00
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* ha - pointer to host adapter structure
|
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* Does context switching-Release SPIN_LOCK
|
|
|
|
* (if any) before calling this routine.
|
|
|
|
*
|
|
|
|
*/
|
2014-04-11 20:54:38 +00:00
|
|
|
static void
|
|
|
|
qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
|
2010-05-04 22:01:32 +00:00
|
|
|
{
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2016-07-06 15:14:25 +00:00
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
2010-05-04 22:01:32 +00:00
|
|
|
|
2016-08-03 18:42:32 +00:00
|
|
|
while ((qla2x00_reset_active(vha) || ha->dpc_active ||
|
|
|
|
ha->flags.mbox_busy) ||
|
|
|
|
test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
|
|
|
|
test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
|
|
|
|
if (test_bit(UNLOADING, &base_vha->dpc_flags))
|
|
|
|
break;
|
2010-05-04 22:01:32 +00:00
|
|
|
msleep(1000);
|
2016-07-06 15:14:25 +00:00
|
|
|
}
|
2010-05-04 22:01:32 +00:00
|
|
|
}
|
|
|
|
|
2009-03-24 16:08:07 +00:00
|
|
|
int
|
|
|
|
qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
|
|
|
|
{
|
|
|
|
int return_status;
|
|
|
|
unsigned long wait_reset;
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
|
|
|
|
|
|
|
wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
|
|
|
|
while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
|
|
|
|
test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
|
|
|
|
test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
|
|
|
|
ha->dpc_active) && time_before(jiffies, wait_reset)) {
|
|
|
|
|
|
|
|
msleep(1000);
|
|
|
|
|
|
|
|
if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
|
|
|
|
ha->flags.chip_reset_done)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ha->flags.chip_reset_done)
|
|
|
|
return_status = QLA_SUCCESS;
|
|
|
|
else
|
|
|
|
return_status = QLA_FUNCTION_FAILED;
|
|
|
|
|
|
|
|
return return_status;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**************************************************************************
|
|
|
|
* qla2xxx_eh_abort
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* The abort function will abort the specified command.
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* cmd = Linux SCSI command packet to be aborted.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Either SUCCESS or FAILED.
|
|
|
|
*
|
|
|
|
* Note:
|
2006-04-27 23:25:30 +00:00
|
|
|
* Only return FAILED if command not returned by firmware.
|
2005-04-16 22:20:36 +00:00
|
|
|
**************************************************************************/
|
2006-11-09 03:55:50 +00:00
|
|
|
static int
|
2005-04-16 22:20:36 +00:00
|
|
|
qla2xxx_eh_abort(struct scsi_cmnd *cmd)
|
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
|
2019-08-09 03:01:23 +00:00
|
|
|
DECLARE_COMPLETION_ONSTACK(comp);
|
2005-04-17 20:02:26 +00:00
|
|
|
srb_t *sp;
|
2011-02-23 23:27:17 +00:00
|
|
|
int ret;
|
2014-06-25 13:27:36 +00:00
|
|
|
unsigned int id;
|
|
|
|
uint64_t lun;
|
2019-04-17 21:44:35 +00:00
|
|
|
int rval;
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2019-11-05 15:06:54 +00:00
|
|
|
uint32_t ratov_j;
|
|
|
|
struct qla_qpair *qpair;
|
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-07-06 15:14:32 +00:00
|
|
|
if (qla2x00_isp_reg_stat(ha)) {
|
|
|
|
ql_log(ql_log_info, vha, 0x8042,
|
|
|
|
"PCI/Register disconnect, exiting.\n");
|
2021-03-29 08:52:25 +00:00
|
|
|
qla_pci_set_eeh_busy(vha);
|
2016-07-06 15:14:32 +00:00
|
|
|
return FAILED;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-02-23 23:27:17 +00:00
|
|
|
ret = fc_block_scsi_eh(cmd);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2019-08-09 03:02:06 +00:00
|
|
|
sp = scsi_cmd_priv(cmd);
|
2019-11-05 15:06:54 +00:00
|
|
|
qpair = sp->qpair;
|
2018-09-04 21:19:20 +00:00
|
|
|
|
2021-01-11 09:31:28 +00:00
|
|
|
vha->cmd_timeout_cnt++;
|
|
|
|
|
2019-11-05 15:06:54 +00:00
|
|
|
if ((sp->fcport && sp->fcport->deleted) || !qpair)
|
2010-10-15 18:27:47 +00:00
|
|
|
return SUCCESS;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-11-05 15:06:54 +00:00
|
|
|
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
|
|
|
|
sp->comp = ∁
|
|
|
|
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
|
|
|
|
|
2018-09-04 21:19:20 +00:00
|
|
|
|
|
|
|
id = cmd->device->id;
|
|
|
|
lun = cmd->device->lun;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_taskm, vha, 0x8002,
|
2015-08-04 17:37:57 +00:00
|
|
|
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
|
|
|
|
vha->host_no, id, lun, sp, cmd, sp->handle);
|
2008-12-18 18:06:15 +00:00
|
|
|
|
2019-11-05 15:06:54 +00:00
|
|
|
/*
|
|
|
|
* Abort will release the original Command/sp from FW. Let the
|
|
|
|
* original command call scsi_done. In return, he will wakeup
|
|
|
|
* this sleeping thread.
|
|
|
|
*/
|
2014-04-11 20:54:31 +00:00
|
|
|
rval = ha->isp_ops->abort_command(sp);
|
2019-11-05 15:06:54 +00:00
|
|
|
|
2019-04-17 21:44:35 +00:00
|
|
|
ql_dbg(ql_dbg_taskm, vha, 0x8003,
|
|
|
|
"Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
|
2014-04-11 20:54:31 +00:00
|
|
|
|
2019-11-05 15:06:54 +00:00
|
|
|
/* Wait for the command completion. */
|
|
|
|
ratov_j = ha->r_a_tov/10 * 4 * 1000;
|
|
|
|
ratov_j = msecs_to_jiffies(ratov_j);
|
2019-04-17 21:44:35 +00:00
|
|
|
switch (rval) {
|
|
|
|
case QLA_SUCCESS:
|
2019-08-09 03:01:23 +00:00
|
|
|
if (!wait_for_completion_timeout(&comp, ratov_j)) {
|
|
|
|
ql_dbg(ql_dbg_taskm, vha, 0xffff,
|
|
|
|
"%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
|
2019-11-05 15:06:54 +00:00
|
|
|
__func__, ha->r_a_tov/10);
|
2019-08-09 03:01:23 +00:00
|
|
|
ret = FAILED;
|
|
|
|
} else {
|
|
|
|
ret = SUCCESS;
|
|
|
|
}
|
|
|
|
break;
|
2019-04-17 21:44:35 +00:00
|
|
|
default:
|
|
|
|
ret = FAILED;
|
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2019-04-17 21:44:35 +00:00
|
|
|
|
2019-08-09 03:01:23 +00:00
|
|
|
sp->comp = NULL;
|
2019-11-05 15:06:54 +00:00
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_info, vha, 0x801c,
|
2019-04-17 21:44:35 +00:00
|
|
|
"Abort command issued nexus=%ld:%d:%llu -- %x.\n",
|
|
|
|
vha->host_no, id, lun, ret);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-08-09 03:01:54 +00:00
|
|
|
/*
|
|
|
|
* Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
|
|
|
|
*/
|
2010-07-23 10:28:35 +00:00
|
|
|
int
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
|
2014-06-25 13:27:36 +00:00
|
|
|
uint64_t l, enum nexus_wait_type type)
|
2005-04-17 20:02:26 +00:00
|
|
|
{
|
2008-12-18 18:06:15 +00:00
|
|
|
int cnt, match, status;
|
2005-05-27 22:04:47 +00:00
|
|
|
unsigned long flags;
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2008-12-10 00:45:39 +00:00
|
|
|
struct req_que *req;
|
2010-07-23 10:28:35 +00:00
|
|
|
srb_t *sp;
|
2012-02-09 19:15:36 +00:00
|
|
|
struct scsi_cmnd *cmd;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-04-03 20:13:24 +00:00
|
|
|
status = QLA_SUCCESS;
|
2008-12-18 18:06:15 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
2009-04-07 05:33:42 +00:00
|
|
|
req = vha->req;
|
2008-12-18 18:06:15 +00:00
|
|
|
for (cnt = 1; status == QLA_SUCCESS &&
|
2013-01-30 08:34:37 +00:00
|
|
|
cnt < req->num_outstanding_cmds; cnt++) {
|
2008-12-18 18:06:15 +00:00
|
|
|
sp = req->outstanding_cmds[cnt];
|
|
|
|
if (!sp)
|
2008-04-03 20:13:24 +00:00
|
|
|
continue;
|
2012-02-09 19:15:36 +00:00
|
|
|
if (sp->type != SRB_SCSI_CMD)
|
2009-08-20 18:06:04 +00:00
|
|
|
continue;
|
2017-01-20 06:28:04 +00:00
|
|
|
if (vha->vp_idx != sp->vha->vp_idx)
|
2008-12-18 18:06:15 +00:00
|
|
|
continue;
|
|
|
|
match = 0;
|
2012-02-09 19:15:36 +00:00
|
|
|
cmd = GET_CMD_SP(sp);
|
2008-12-18 18:06:15 +00:00
|
|
|
switch (type) {
|
|
|
|
case WAIT_HOST:
|
|
|
|
match = 1;
|
|
|
|
break;
|
|
|
|
case WAIT_TARGET:
|
2012-02-09 19:15:36 +00:00
|
|
|
match = cmd->device->id == t;
|
2008-12-18 18:06:15 +00:00
|
|
|
break;
|
|
|
|
case WAIT_LUN:
|
2012-02-09 19:15:36 +00:00
|
|
|
match = (cmd->device->id == t &&
|
|
|
|
cmd->device->lun == l);
|
2008-12-18 18:06:15 +00:00
|
|
|
break;
|
2008-12-10 00:45:39 +00:00
|
|
|
}
|
2008-12-18 18:06:15 +00:00
|
|
|
if (!match)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
2012-02-09 19:15:36 +00:00
|
|
|
status = qla2x00_eh_wait_on_command(cmd);
|
2008-12-18 18:06:15 +00:00
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-11-06 18:40:51 +00:00
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
2008-04-03 20:13:24 +00:00
|
|
|
|
|
|
|
return status;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-04-03 20:13:24 +00:00
|
|
|
static char *reset_errors[] = {
|
|
|
|
"HBA not online",
|
|
|
|
"HBA not ready",
|
|
|
|
"Task management failed",
|
|
|
|
"Waiting for command completions",
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-11-09 03:55:50 +00:00
|
|
|
static int
|
2021-08-19 09:19:13 +00:00
|
|
|
qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2021-08-19 09:19:13 +00:00
|
|
|
struct scsi_device *sdev = cmd->device;
|
|
|
|
scsi_qla_host_t *vha = shost_priv(sdev->host);
|
|
|
|
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
|
|
|
|
fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2008-04-03 20:13:24 +00:00
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-08-19 09:19:13 +00:00
|
|
|
if (qla2x00_isp_reg_stat(ha)) {
|
|
|
|
ql_log(ql_log_info, vha, 0x803e,
|
|
|
|
"PCI/Register disconnect, exiting.\n");
|
|
|
|
qla_pci_set_eeh_busy(vha);
|
|
|
|
return FAILED;
|
|
|
|
}
|
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
if (!fcport) {
|
2008-04-03 20:13:24 +00:00
|
|
|
return FAILED;
|
2011-07-14 19:00:13 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-08-19 09:19:13 +00:00
|
|
|
err = fc_block_rport(rport);
|
2011-02-23 23:27:17 +00:00
|
|
|
if (err != 0)
|
|
|
|
return err;
|
|
|
|
|
2019-07-26 16:07:31 +00:00
|
|
|
if (fcport->deleted)
|
|
|
|
return SUCCESS;
|
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_info, vha, 0x8009,
|
2021-08-19 09:19:13 +00:00
|
|
|
"DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
|
|
|
|
sdev->id, sdev->lun, cmd);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-04-03 20:13:24 +00:00
|
|
|
err = 0;
|
2011-07-14 19:00:13 +00:00
|
|
|
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
|
|
|
|
ql_log(ql_log_warn, vha, 0x800a,
|
|
|
|
"Wait for hba online failed for cmd=%p.\n", cmd);
|
2008-04-03 20:13:24 +00:00
|
|
|
goto eh_reset_failed;
|
2011-07-14 19:00:13 +00:00
|
|
|
}
|
2008-04-03 20:13:24 +00:00
|
|
|
err = 2;
|
2021-08-19 09:19:13 +00:00
|
|
|
if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1)
|
2011-07-14 19:00:13 +00:00
|
|
|
!= QLA_SUCCESS) {
|
|
|
|
ql_log(ql_log_warn, vha, 0x800c,
|
|
|
|
"do_reset failed for cmd=%p.\n", cmd);
|
2008-04-03 20:13:24 +00:00
|
|
|
goto eh_reset_failed;
|
2011-07-14 19:00:13 +00:00
|
|
|
}
|
2008-04-03 20:13:24 +00:00
|
|
|
err = 3;
|
2021-08-19 09:19:13 +00:00
|
|
|
if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
|
|
|
|
sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_warn, vha, 0x800d,
|
2012-08-22 18:20:58 +00:00
|
|
|
"wait for pending cmds failed for cmd=%p.\n", cmd);
|
2008-04-03 20:13:24 +00:00
|
|
|
goto eh_reset_failed;
|
2011-07-14 19:00:13 +00:00
|
|
|
}
|
2008-04-03 20:13:24 +00:00
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_info, vha, 0x800e,
|
2021-08-19 09:19:13 +00:00
|
|
|
"DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n",
|
|
|
|
vha->host_no, sdev->id, sdev->lun, cmd);
|
2008-04-03 20:13:24 +00:00
|
|
|
|
|
|
|
return SUCCESS;
|
|
|
|
|
2010-07-23 10:28:35 +00:00
|
|
|
eh_reset_failed:
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_info, vha, 0x800f,
|
2021-08-19 09:19:13 +00:00
|
|
|
"DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
|
|
|
|
reset_errors[err], vha->host_no, sdev->id, sdev->lun,
|
2011-11-18 17:03:07 +00:00
|
|
|
cmd);
|
2021-01-11 09:31:28 +00:00
|
|
|
vha->reset_cmd_err_cnt++;
|
2008-04-03 20:13:24 +00:00
|
|
|
return FAILED;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
static int
|
2008-04-03 20:13:24 +00:00
|
|
|
qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2021-08-19 09:19:12 +00:00
|
|
|
struct scsi_device *sdev = cmd->device;
|
|
|
|
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
|
|
|
|
scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport));
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2021-08-19 09:19:12 +00:00
|
|
|
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
|
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-07-06 15:14:32 +00:00
|
|
|
if (qla2x00_isp_reg_stat(ha)) {
|
|
|
|
ql_log(ql_log_info, vha, 0x803f,
|
|
|
|
"PCI/Register disconnect, exiting.\n");
|
2021-03-29 08:52:25 +00:00
|
|
|
qla_pci_set_eeh_busy(vha);
|
2016-07-06 15:14:32 +00:00
|
|
|
return FAILED;
|
|
|
|
}
|
|
|
|
|
2021-08-19 09:19:12 +00:00
|
|
|
if (!fcport) {
|
|
|
|
return FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = fc_block_rport(rport);
|
|
|
|
if (err != 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (fcport->deleted)
|
|
|
|
return SUCCESS;
|
|
|
|
|
|
|
|
ql_log(ql_log_info, vha, 0x8009,
|
|
|
|
"TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
|
|
|
|
sdev->id, cmd);
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
|
|
|
|
ql_log(ql_log_warn, vha, 0x800a,
|
|
|
|
"Wait for hba online failed for cmd=%p.\n", cmd);
|
|
|
|
goto eh_reset_failed;
|
|
|
|
}
|
|
|
|
err = 2;
|
|
|
|
if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) {
|
|
|
|
ql_log(ql_log_warn, vha, 0x800c,
|
|
|
|
"target_reset failed for cmd=%p.\n", cmd);
|
|
|
|
goto eh_reset_failed;
|
|
|
|
}
|
|
|
|
err = 3;
|
|
|
|
if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
|
|
|
|
0, WAIT_TARGET) != QLA_SUCCESS) {
|
|
|
|
ql_log(ql_log_warn, vha, 0x800d,
|
|
|
|
"wait for pending cmds failed for cmd=%p.\n", cmd);
|
|
|
|
goto eh_reset_failed;
|
|
|
|
}
|
|
|
|
|
|
|
|
ql_log(ql_log_info, vha, 0x800e,
|
|
|
|
"TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n",
|
|
|
|
vha->host_no, sdev->id, cmd);
|
|
|
|
|
|
|
|
return SUCCESS;
|
|
|
|
|
|
|
|
eh_reset_failed:
|
|
|
|
ql_log(ql_log_info, vha, 0x800f,
|
|
|
|
"TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
|
|
|
|
reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
|
|
|
|
cmd);
|
|
|
|
vha->reset_cmd_err_cnt++;
|
|
|
|
return FAILED;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**************************************************************************
|
|
|
|
* qla2xxx_eh_bus_reset
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* The bus reset function will reset the bus and abort any executing
|
|
|
|
* commands.
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* cmd = Linux SCSI command packet of the command that cause the
|
|
|
|
* bus reset.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* SUCCESS/FAILURE (defined as macro in scsi.h).
|
|
|
|
*
|
|
|
|
**************************************************************************/
|
2006-11-09 03:55:50 +00:00
|
|
|
static int
|
2005-04-16 22:20:36 +00:00
|
|
|
qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
|
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
|
2007-07-05 20:16:51 +00:00
|
|
|
int ret = FAILED;
|
2014-06-25 13:27:36 +00:00
|
|
|
unsigned int id;
|
|
|
|
uint64_t lun;
|
2016-07-06 15:14:32 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
|
|
|
|
if (qla2x00_isp_reg_stat(ha)) {
|
|
|
|
ql_log(ql_log_info, vha, 0x8040,
|
|
|
|
"PCI/Register disconnect, exiting.\n");
|
2021-03-29 08:52:25 +00:00
|
|
|
qla_pci_set_eeh_busy(vha);
|
2016-07-06 15:14:32 +00:00
|
|
|
return FAILED;
|
|
|
|
}
|
2005-04-17 20:02:26 +00:00
|
|
|
|
|
|
|
id = cmd->device->id;
|
|
|
|
lun = cmd->device->lun;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-07-26 16:07:31 +00:00
|
|
|
if (qla2x00_chip_is_down(vha))
|
|
|
|
return ret;
|
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_info, vha, 0x8012,
|
2014-06-25 13:27:36 +00:00
|
|
|
"BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, vha, 0x8013,
|
|
|
|
"Wait for hba online failed board disabled.\n");
|
2005-04-17 20:02:26 +00:00
|
|
|
goto eh_bus_reset_done;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-11-18 17:02:09 +00:00
|
|
|
if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
|
|
|
|
ret = SUCCESS;
|
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
if (ret == FAILED)
|
|
|
|
goto eh_bus_reset_done;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-09-20 20:25:53 +00:00
|
|
|
/* Flush outstanding commands. */
|
2010-07-23 10:28:35 +00:00
|
|
|
if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
|
2011-07-14 19:00:13 +00:00
|
|
|
QLA_SUCCESS) {
|
|
|
|
ql_log(ql_log_warn, vha, 0x8014,
|
|
|
|
"Wait for pending commands failed.\n");
|
2005-09-20 20:25:53 +00:00
|
|
|
ret = FAILED;
|
2011-07-14 19:00:13 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
eh_bus_reset_done:
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_warn, vha, 0x802b,
|
2014-06-25 13:27:36 +00:00
|
|
|
"BUS RESET %s nexus=%ld:%d:%llu.\n",
|
2012-08-22 18:20:58 +00:00
|
|
|
(ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**************************************************************************
|
|
|
|
* qla2xxx_eh_host_reset
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* The reset function will reset the Adapter.
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* cmd = Linux SCSI command packet of the command that cause the
|
|
|
|
* adapter reset.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Either SUCCESS or FAILED.
|
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
**************************************************************************/
|
2006-11-09 03:55:50 +00:00
|
|
|
static int
|
2005-04-16 22:20:36 +00:00
|
|
|
qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
|
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2007-07-05 20:16:51 +00:00
|
|
|
int ret = FAILED;
|
2014-06-25 13:27:36 +00:00
|
|
|
unsigned int id;
|
|
|
|
uint64_t lun;
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-07-06 15:14:32 +00:00
|
|
|
if (qla2x00_isp_reg_stat(ha)) {
|
|
|
|
ql_log(ql_log_info, vha, 0x8041,
|
|
|
|
"PCI/Register disconnect, exiting.\n");
|
2021-03-29 08:52:25 +00:00
|
|
|
qla_pci_set_eeh_busy(vha);
|
2016-07-06 15:14:32 +00:00
|
|
|
return SUCCESS;
|
|
|
|
}
|
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
id = cmd->device->id;
|
|
|
|
lun = cmd->device->lun;
|
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_info, vha, 0x8018,
|
2014-06-25 13:27:36 +00:00
|
|
|
"ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-04-11 20:54:46 +00:00
|
|
|
/*
|
|
|
|
* No point in issuing another reset if one is active. Also do not
|
|
|
|
* attempt a reset if we are updating flash.
|
|
|
|
*/
|
|
|
|
if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
|
2005-04-17 20:02:26 +00:00
|
|
|
goto eh_host_reset_lock;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
if (vha != base_vha) {
|
|
|
|
if (qla2x00_vp_abort_isp(vha))
|
2005-04-17 20:02:26 +00:00
|
|
|
goto eh_host_reset_lock;
|
2008-11-06 18:40:51 +00:00
|
|
|
} else {
|
2013-08-27 05:37:28 +00:00
|
|
|
if (IS_P3P_TYPE(vha->hw)) {
|
2010-04-13 00:59:55 +00:00
|
|
|
if (!qla82xx_fcoe_ctx_reset(vha)) {
|
|
|
|
/* Ctx reset success */
|
|
|
|
ret = SUCCESS;
|
|
|
|
goto eh_host_reset_lock;
|
|
|
|
}
|
|
|
|
/* fall thru if ctx reset failed */
|
|
|
|
}
|
2009-04-07 05:33:41 +00:00
|
|
|
if (ha->wq)
|
|
|
|
flush_workqueue(ha->wq);
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
|
2010-04-13 00:59:55 +00:00
|
|
|
if (ha->isp_ops->abort_isp(base_vha)) {
|
2008-11-06 18:40:51 +00:00
|
|
|
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
|
|
|
|
/* failed. schedule dpc to try */
|
|
|
|
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
|
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
|
|
|
|
ql_log(ql_log_warn, vha, 0x802a,
|
|
|
|
"wait for hba online failed.\n");
|
2008-11-06 18:40:51 +00:00
|
|
|
goto eh_host_reset_lock;
|
2011-07-14 19:00:13 +00:00
|
|
|
}
|
2008-11-06 18:40:51 +00:00
|
|
|
}
|
|
|
|
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
|
2005-07-06 17:32:07 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
/* Waiting for command to be returned to OS.*/
|
2010-07-23 10:28:35 +00:00
|
|
|
if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
|
2008-11-06 18:40:51 +00:00
|
|
|
QLA_SUCCESS)
|
2005-04-17 20:02:26 +00:00
|
|
|
ret = SUCCESS;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
eh_host_reset_lock:
|
2011-11-18 17:03:07 +00:00
|
|
|
ql_log(ql_log_info, vha, 0x8017,
|
2014-06-25 13:27:36 +00:00
|
|
|
"ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
|
2011-11-18 17:03:07 +00:00
|
|
|
(ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* qla2x00_loop_reset
|
|
|
|
* Issue loop reset.
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* ha = adapter block pointer.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 = success
|
|
|
|
*/
|
2008-01-17 17:02:12 +00:00
|
|
|
int
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_loop_reset(scsi_qla_host_t *vha)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-12-14 03:20:30 +00:00
|
|
|
int ret;
|
2005-04-17 20:06:53 +00:00
|
|
|
struct fc_port *fcport;
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-08-27 05:37:33 +00:00
|
|
|
if (IS_QLAFX00(ha)) {
|
|
|
|
return qlafx00_loop_reset(vha);
|
|
|
|
}
|
|
|
|
|
2010-05-04 22:01:33 +00:00
|
|
|
if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
|
2010-02-18 18:07:25 +00:00
|
|
|
list_for_each_entry(fcport, &vha->vp_fcports, list) {
|
|
|
|
if (fcport->port_type != FCT_TARGET)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = ha->isp_ops->target_reset(fcport, 0, 0);
|
|
|
|
if (ret != QLA_SUCCESS) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_taskm, vha, 0x802c,
|
2013-08-27 05:37:33 +00:00
|
|
|
"Bus Reset failed: Reset=%d "
|
2011-07-14 19:00:13 +00:00
|
|
|
"d_id=%x.\n", ret, fcport->d_id.b24);
|
2010-02-18 18:07:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
|
2012-02-09 19:15:34 +00:00
|
|
|
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
|
2013-02-08 06:57:42 +00:00
|
|
|
atomic_set(&vha->loop_state, LOOP_DOWN);
|
|
|
|
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
|
2019-12-17 22:06:04 +00:00
|
|
|
qla2x00_mark_all_devices_lost(vha);
|
2008-11-06 18:40:51 +00:00
|
|
|
ret = qla2x00_full_login_lip(vha);
|
2006-12-14 03:20:30 +00:00
|
|
|
if (ret != QLA_SUCCESS) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_taskm, vha, 0x802d,
|
|
|
|
"full_login_lip=%d.\n", ret);
|
2008-11-14 21:48:12 +00:00
|
|
|
}
|
2006-12-14 03:20:30 +00:00
|
|
|
}
|
|
|
|
|
2009-08-25 18:36:19 +00:00
|
|
|
if (ha->flags.enable_lip_reset) {
|
2008-11-06 18:40:51 +00:00
|
|
|
ret = qla2x00_lip_reset(vha);
|
2011-11-18 17:02:09 +00:00
|
|
|
if (ret != QLA_SUCCESS)
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_taskm, vha, 0x802e,
|
|
|
|
"lip_reset failed (%d).\n", ret);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Issue marker command only when we are going to start the I/O */
|
2008-11-06 18:40:51 +00:00
|
|
|
vha->marker_needed = 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-12-14 03:20:30 +00:00
|
|
|
return QLA_SUCCESS;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2020-02-20 04:34:37 +00:00
|
|
|
/*
|
|
|
|
* The caller must ensure that no completion interrupts will happen
|
|
|
|
* while this function is in progress.
|
|
|
|
*/
|
2018-11-29 18:25:11 +00:00
|
|
|
static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
|
|
|
|
unsigned long *flags)
|
|
|
|
__releases(qp->qp_lock_ptr)
|
|
|
|
__acquires(qp->qp_lock_ptr)
|
|
|
|
{
|
2019-04-17 21:44:35 +00:00
|
|
|
DECLARE_COMPLETION_ONSTACK(comp);
|
2018-11-29 18:25:11 +00:00
|
|
|
scsi_qla_host_t *vha = qp->vha;
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2020-02-20 04:34:37 +00:00
|
|
|
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
|
2019-04-17 21:44:35 +00:00
|
|
|
int rval;
|
2019-11-05 15:06:54 +00:00
|
|
|
bool ret_cmd;
|
|
|
|
uint32_t ratov_j;
|
2018-11-29 18:25:11 +00:00
|
|
|
|
2020-01-23 04:23:40 +00:00
|
|
|
lockdep_assert_held(qp->qp_lock_ptr);
|
|
|
|
|
2019-11-05 15:06:54 +00:00
|
|
|
if (qla2x00_chip_is_down(vha)) {
|
|
|
|
sp->done(sp, res);
|
2019-04-17 21:44:35 +00:00
|
|
|
return;
|
2019-11-05 15:06:54 +00:00
|
|
|
}
|
2019-04-17 21:44:35 +00:00
|
|
|
|
|
|
|
if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
|
|
|
|
(sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
|
|
|
|
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
|
|
|
|
!qla2x00_isp_reg_stat(ha))) {
|
2019-11-05 15:06:54 +00:00
|
|
|
if (sp->comp) {
|
|
|
|
sp->done(sp, res);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-04-17 21:44:35 +00:00
|
|
|
sp->comp = ∁
|
|
|
|
spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
|
|
|
|
|
2019-11-05 15:06:54 +00:00
|
|
|
rval = ha->isp_ops->abort_command(sp);
|
|
|
|
/* Wait for command completion. */
|
|
|
|
ret_cmd = false;
|
|
|
|
ratov_j = ha->r_a_tov/10 * 4 * 1000;
|
|
|
|
ratov_j = msecs_to_jiffies(ratov_j);
|
2019-04-17 21:44:35 +00:00
|
|
|
switch (rval) {
|
|
|
|
case QLA_SUCCESS:
|
2019-11-05 15:06:54 +00:00
|
|
|
if (wait_for_completion_timeout(&comp, ratov_j)) {
|
|
|
|
ql_dbg(ql_dbg_taskm, vha, 0xffff,
|
|
|
|
"%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
|
|
|
|
__func__, ha->r_a_tov/10);
|
|
|
|
ret_cmd = true;
|
|
|
|
}
|
|
|
|
/* else FW return SP to driver */
|
2019-04-17 21:44:35 +00:00
|
|
|
break;
|
2019-11-05 15:06:54 +00:00
|
|
|
default:
|
|
|
|
ret_cmd = true;
|
2019-04-17 21:44:35 +00:00
|
|
|
break;
|
2018-11-29 18:25:11 +00:00
|
|
|
}
|
2019-04-17 21:44:35 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
|
2021-08-09 23:03:41 +00:00
|
|
|
if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
|
2019-11-05 15:06:54 +00:00
|
|
|
sp->done(sp, res);
|
|
|
|
} else {
|
|
|
|
sp->done(sp, res);
|
2018-11-29 18:25:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 04:34:37 +00:00
|
|
|
/*
|
|
|
|
* The caller must ensure that no completion interrupts will happen
|
|
|
|
* while this function is in progress.
|
|
|
|
*/
|
2017-12-28 20:33:13 +00:00
|
|
|
static void
|
|
|
|
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
|
2008-01-31 20:33:46 +00:00
|
|
|
{
|
2018-10-18 22:45:44 +00:00
|
|
|
int cnt;
|
2008-01-31 20:33:46 +00:00
|
|
|
unsigned long flags;
|
|
|
|
srb_t *sp;
|
2017-12-28 20:33:13 +00:00
|
|
|
scsi_qla_host_t *vha = qp->vha;
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2008-12-10 00:45:39 +00:00
|
|
|
struct req_que *req;
|
2017-06-14 03:47:16 +00:00
|
|
|
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
|
|
struct qla_tgt_cmd *cmd;
|
2014-09-25 10:14:51 +00:00
|
|
|
|
2018-03-05 05:02:55 +00:00
|
|
|
if (!ha->req_q_map)
|
|
|
|
return;
|
2017-12-28 20:33:13 +00:00
|
|
|
spin_lock_irqsave(qp->qp_lock_ptr, flags);
|
|
|
|
req = qp->req;
|
|
|
|
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
|
|
|
|
sp = req->outstanding_cmds[cnt];
|
|
|
|
if (sp) {
|
2018-09-04 21:19:13 +00:00
|
|
|
switch (sp->cmd_type) {
|
|
|
|
case TYPE_SRB:
|
2018-11-29 18:25:11 +00:00
|
|
|
qla2x00_abort_srb(qp, sp, res, &flags);
|
2018-09-04 21:19:20 +00:00
|
|
|
break;
|
|
|
|
case TYPE_TGT_CMD:
|
2017-12-28 20:33:13 +00:00
|
|
|
if (!vha->hw->tgt.tgt_ops || !tgt ||
|
|
|
|
qla_ini_mode_enabled(vha)) {
|
2018-09-04 21:19:20 +00:00
|
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
|
|
|
|
"HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
|
|
|
|
vha->dpc_flags);
|
2017-12-28 20:33:13 +00:00
|
|
|
continue;
|
2016-11-14 21:26:22 +00:00
|
|
|
}
|
2017-12-28 20:33:13 +00:00
|
|
|
cmd = (struct qla_tgt_cmd *)sp;
|
2019-04-17 21:44:29 +00:00
|
|
|
cmd->aborted = 1;
|
2018-09-04 21:19:20 +00:00
|
|
|
break;
|
|
|
|
case TYPE_TGT_TMCMD:
|
2019-04-17 21:44:29 +00:00
|
|
|
/* Skip task management functions. */
|
2018-09-04 21:19:20 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2008-12-10 00:45:39 +00:00
|
|
|
}
|
2019-11-05 15:06:54 +00:00
|
|
|
req->outstanding_cmds[cnt] = NULL;
|
2008-01-31 20:33:46 +00:00
|
|
|
}
|
|
|
|
}
|
2017-12-28 20:33:13 +00:00
|
|
|
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
|
|
|
|
}
|
|
|
|
|
2020-02-20 04:34:37 +00:00
|
|
|
/*
|
|
|
|
* The caller must ensure that no completion interrupts will happen
|
|
|
|
* while this function is in progress.
|
|
|
|
*/
|
2017-12-28 20:33:13 +00:00
|
|
|
void
|
|
|
|
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
|
|
|
|
{
|
|
|
|
int que;
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
|
2019-07-26 16:07:35 +00:00
|
|
|
/* Continue only if initialization complete. */
|
|
|
|
if (!ha->base_qpair)
|
|
|
|
return;
|
2017-12-28 20:33:13 +00:00
|
|
|
__qla2x00_abort_all_cmds(ha->base_qpair, res);
|
|
|
|
|
2019-07-26 16:07:35 +00:00
|
|
|
if (!ha->queue_pair_map)
|
|
|
|
return;
|
2017-12-28 20:33:13 +00:00
|
|
|
for (que = 0; que < ha->max_qpairs; que++) {
|
|
|
|
if (!ha->queue_pair_map[que])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
__qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
|
|
|
|
}
|
2008-01-31 20:33:46 +00:00
|
|
|
}
|
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
static int
|
|
|
|
qla2xxx_slave_alloc(struct scsi_device *sdev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-04-17 20:06:53 +00:00
|
|
|
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[SCSI] update fc_transport for removal of block/unblock functions
We recently went back to implement a board reset. When we perform the
reset, we wanted to tear down the internal data structures and rebuild
them. Unfortunately, when it came to the rport structure, things were
odd. If we deleted them, the scsi targets and sdevs would be
torn down. Not a good thing for a temporary reset. We could block the
rports, but we either maintain the internal structures to keep the
rport reference (perhaps even replicating what's in the transport),
or we have to fatten the fc transport with new search routines to find
the rport (and deal with a case of a dangling rport that the driver
forgets).
It dawned on me that we had actually reached this state incorrectly.
When the fc transport first started, we did the block/unblock first, then
added the rport interface. The purpose of block/unblock is to hide the
temporary disappearance of the rport (e.g. being deleted, then readded).
Why are we making the driver do the block/unblock ? We should be making
the transport have only an rport add/delete, and the let the transport
handle the block/unblock.
So... This patch removes the existing fc_remote_port_block/unblock
functions. It moves the block/unblock functionality into the
fc_remote_port_add/delete functions. Updates for the lpfc driver are
included. Qlogic driver updates are also enclosed, thanks to the
contributions of Andrew Vasquez. [Note: the qla2xxx changes are
relative to the scsi-misc-2.6 tree as of this morning - which does
not include the recent patches sent by Andrew]. The zfcp driver does
not use the block/unblock functions.
One last comment: The resulting behavior feels very clean. The LLDD is
concerned only with add/delete, which corresponds to the physical
disappearance. However, the fact that the scsi target and sdevs are
not immediately torn down after the LLDD calls delete causes an
interesting scenario... the midlayer can call the xxx_slave_alloc and
xxx_queuecommand functions with a sdev that is at the location the
rport used to be. The driver must validate the device exists when it
first enters these functions. In thinking about it, this has always
been the case for the LLDD and these routines. The existing drivers
already check for existence. However, this highlights that simple
validation via data structure dereferencing needs to be watched.
To deal with this, a new transport function, fc_remote_port_chkready()
was created that LLDDs should call when they first enter these two
routines. It validates the rport state, and returns a scsi result
which could be returned. In addition to solving the above, it also
creates consistent behavior from the LLDD's when the block and deletes
are occuring.
Rejections fixed up and
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2005-10-18 16:03:35 +00:00
|
|
|
if (!rport || fc_remote_port_chkready(rport))
|
2005-04-17 20:02:26 +00:00
|
|
|
return -ENXIO;
|
2005-04-17 20:06:53 +00:00
|
|
|
|
[SCSI] update fc_transport for removal of block/unblock functions
We recently went back to implement a board reset. When we perform the
reset, we wanted to tear down the internal data structures and rebuild
them. Unfortunately, when it came to the rport structure, things were
odd. If we deleted them, the scsi targets and sdevs would be
torn down. Not a good thing for a temporary reset. We could block the
rports, but we either maintain the internal structures to keep the
rport reference (perhaps even replicating what's in the transport),
or we have to fatten the fc transport with new search routines to find
the rport (and deal with a case of a dangling rport that the driver
forgets).
It dawned on me that we had actually reached this state incorrectly.
When the fc transport first started, we did the block/unblock first, then
added the rport interface. The purpose of block/unblock is to hide the
temporary disappearance of the rport (e.g. being deleted, then readded).
Why are we making the driver do the block/unblock ? We should be making
the transport have only an rport add/delete, and the let the transport
handle the block/unblock.
So... This patch removes the existing fc_remote_port_block/unblock
functions. It moves the block/unblock functionality into the
fc_remote_port_add/delete functions. Updates for the lpfc driver are
included. Qlogic driver updates are also enclosed, thanks to the
contributions of Andrew Vasquez. [Note: the qla2xxx changes are
relative to the scsi-misc-2.6 tree as of this morning - which does
not include the recent patches sent by Andrew]. The zfcp driver does
not use the block/unblock functions.
One last comment: The resulting behavior feels very clean. The LLDD is
concerned only with add/delete, which corresponds to the physical
disappearance. However, the fact that the scsi target and sdevs are
not immediately torn down after the LLDD calls delete causes an
interesting scenario... the midlayer can call the xxx_slave_alloc and
xxx_queuecommand functions with a sdev that is at the location the
rport used to be. The driver must validate the device exists when it
first enters these functions. In thinking about it, this has always
been the case for the LLDD and these routines. The existing drivers
already check for existence. However, this highlights that simple
validation via data structure dereferencing needs to be watched.
To deal with this, a new transport function, fc_remote_port_chkready()
was created that LLDDs should call when they first enter these two
routines. It validates the rport state, and returns a scsi result
which could be returned. In addition to solving the above, it also
creates consistent behavior from the LLDD's when the block and deletes
are occuring.
Rejections fixed up and
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2005-10-18 16:03:35 +00:00
|
|
|
sdev->hostdata = *(fc_port_t **)rport->dd_data;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
static int
|
|
|
|
qla2xxx_slave_configure(struct scsi_device *sdev)
|
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *vha = shost_priv(sdev->host);
|
2009-04-07 05:33:40 +00:00
|
|
|
struct req_que *req = vha->req;
|
2005-04-17 20:04:54 +00:00
|
|
|
|
2012-08-22 18:21:31 +00:00
|
|
|
if (IS_T10_PI_CAPABLE(vha->hw))
|
|
|
|
blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
|
|
|
|
|
2014-11-13 14:08:42 +00:00
|
|
|
scsi_change_queue_depth(sdev, req->max_q_depth);
|
2005-04-17 20:02:26 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-04-17 20:02:26 +00:00
|
|
|
static void
|
|
|
|
qla2xxx_slave_destroy(struct scsi_device *sdev)
|
|
|
|
{
|
|
|
|
sdev->hostdata = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
|
|
|
|
* @ha: HA context
|
|
|
|
*
|
|
|
|
* At exit, the @ha's flags.enable_64bit_addressing set to indicated
|
|
|
|
* supported addressing method.
|
|
|
|
*/
|
|
|
|
static void
|
2009-01-22 17:45:37 +00:00
|
|
|
qla2x00_config_dma_addressing(struct qla_hw_data *ha)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-27 02:08:00 +00:00
|
|
|
/* Assume a 32bit DMA mask. */
|
2005-04-16 22:20:36 +00:00
|
|
|
ha->flags.enable_64bit_addressing = 0;
|
|
|
|
|
2009-04-07 02:01:13 +00:00
|
|
|
if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
|
2005-08-27 02:08:00 +00:00
|
|
|
/* Any upper-dword bits set? */
|
|
|
|
if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
|
scsi: qla2xxx: Remove pci-dma-compat wrapper API
The legacy API wrappers in include/linux/pci-dma-compat.h should go away as
they create unnecessary midlayering for include/linux/dma-mapping.h API.
Instead use dma-mapping.h API directly.
The patch has been generated with the coccinelle script below. Compile
tested.
@@@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@@@
- PCI_DMA_NONE
+ DMA_NONE
@@ expression E1, E2, E3; @@
- pci_alloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3; @@
- pci_zalloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3, E4; @@
- pci_free_consistent(E1, E2, E3, E4)
+ dma_free_coherent(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_single(E1, E2, E3, E4)
+ dma_map_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_single(E1, E2, E3, E4)
+ dma_unmap_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4, E5; @@
- pci_map_page(E1, E2, E3, E4, E5)
+ dma_map_page(&E1->dev, E2, E3, E4, E5)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_page(E1, E2, E3, E4)
+ dma_unmap_page(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_sg(E1, E2, E3, E4)
+ dma_map_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_sg(E1, E2, E3, E4)
+ dma_unmap_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_cpu(E1, E2, E3, E4)
+ dma_sync_single_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_device(E1, E2, E3, E4)
+ dma_sync_single_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_cpu(E1, E2, E3, E4)
+ dma_sync_sg_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_device(E1, E2, E3, E4)
+ dma_sync_sg_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2; @@
- pci_dma_mapping_error(E1, E2)
+ dma_mapping_error(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_consistent_dma_mask(E1, E2)
+ dma_set_coherent_mask(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_dma_mask(E1, E2)
+ dma_set_mask(&E1->dev, E2)
Link: https://lore.kernel.org/r/24627a86cf1e67fd229bc323316523d1ba0811f9.1596045683.git.usuraj35@gmail.com
Signed-off-by: Suraj Upadhyay <usuraj35@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-07-29 18:12:40 +00:00
|
|
|
!dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
|
2005-08-27 02:08:00 +00:00
|
|
|
/* Ok, a 64bit DMA mask is applicable. */
|
2005-04-16 22:20:36 +00:00
|
|
|
ha->flags.enable_64bit_addressing = 1;
|
2007-07-19 22:06:00 +00:00
|
|
|
ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
|
|
|
|
ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
|
2005-08-27 02:08:00 +00:00
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
2005-08-27 02:08:00 +00:00
|
|
|
|
2009-04-07 02:01:15 +00:00
|
|
|
dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
|
scsi: qla2xxx: Remove pci-dma-compat wrapper API
The legacy API wrappers in include/linux/pci-dma-compat.h should go away as
they create unnecessary midlayering for include/linux/dma-mapping.h API.
Instead use dma-mapping.h API directly.
The patch has been generated with the coccinelle script below. Compile
tested.
@@@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@@@
- PCI_DMA_NONE
+ DMA_NONE
@@ expression E1, E2, E3; @@
- pci_alloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3; @@
- pci_zalloc_consistent(E1, E2, E3)
+ dma_alloc_coherent(&E1->dev, E2, E3, GFP_)
@@ expression E1, E2, E3, E4; @@
- pci_free_consistent(E1, E2, E3, E4)
+ dma_free_coherent(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_single(E1, E2, E3, E4)
+ dma_map_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_single(E1, E2, E3, E4)
+ dma_unmap_single(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4, E5; @@
- pci_map_page(E1, E2, E3, E4, E5)
+ dma_map_page(&E1->dev, E2, E3, E4, E5)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_page(E1, E2, E3, E4)
+ dma_unmap_page(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_map_sg(E1, E2, E3, E4)
+ dma_map_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_unmap_sg(E1, E2, E3, E4)
+ dma_unmap_sg(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_cpu(E1, E2, E3, E4)
+ dma_sync_single_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_single_for_device(E1, E2, E3, E4)
+ dma_sync_single_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_cpu(E1, E2, E3, E4)
+ dma_sync_sg_for_cpu(&E1->dev, E2, E3, E4)
@@ expression E1, E2, E3, E4; @@
- pci_dma_sync_sg_for_device(E1, E2, E3, E4)
+ dma_sync_sg_for_device(&E1->dev, E2, E3, E4)
@@ expression E1, E2; @@
- pci_dma_mapping_error(E1, E2)
+ dma_mapping_error(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_consistent_dma_mask(E1, E2)
+ dma_set_coherent_mask(&E1->dev, E2)
@@ expression E1, E2; @@
- pci_set_dma_mask(E1, E2)
+ dma_set_mask(&E1->dev, E2)
Link: https://lore.kernel.org/r/24627a86cf1e67fd229bc323316523d1ba0811f9.1596045683.git.usuraj35@gmail.com
Signed-off-by: Suraj Upadhyay <usuraj35@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-07-29 18:12:40 +00:00
|
|
|
dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-07-19 22:06:00 +00:00
|
|
|
static void
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_enable_intrs(struct qla_hw_data *ha)
|
2007-07-19 22:06:00 +00:00
|
|
|
{
|
|
|
|
unsigned long flags = 0;
|
|
|
|
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
ha->interrupts_on = 1;
|
|
|
|
/* enable risc and host interrupts */
|
2020-05-18 21:17:08 +00:00
|
|
|
wrt_reg_word(®->ictrl, ICR_EN_INT | ICR_EN_RISC);
|
|
|
|
rd_reg_word(®->ictrl);
|
2007-07-19 22:06:00 +00:00
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_disable_intrs(struct qla_hw_data *ha)
|
2007-07-19 22:06:00 +00:00
|
|
|
{
|
|
|
|
unsigned long flags = 0;
|
|
|
|
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
ha->interrupts_on = 0;
|
|
|
|
/* disable risc and host interrupts */
|
2020-05-18 21:17:08 +00:00
|
|
|
wrt_reg_word(®->ictrl, 0);
|
|
|
|
rd_reg_word(®->ictrl);
|
2007-07-19 22:06:00 +00:00
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2008-11-06 18:40:51 +00:00
|
|
|
qla24xx_enable_intrs(struct qla_hw_data *ha)
|
2007-07-19 22:06:00 +00:00
|
|
|
{
|
|
|
|
unsigned long flags = 0;
|
|
|
|
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
ha->interrupts_on = 1;
|
2020-05-18 21:17:08 +00:00
|
|
|
wrt_reg_dword(®->ictrl, ICRX_EN_RISC_INT);
|
|
|
|
rd_reg_dword(®->ictrl);
|
2007-07-19 22:06:00 +00:00
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2008-11-06 18:40:51 +00:00
|
|
|
qla24xx_disable_intrs(struct qla_hw_data *ha)
|
2007-07-19 22:06:00 +00:00
|
|
|
{
|
|
|
|
unsigned long flags = 0;
|
|
|
|
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
|
|
|
|
2009-01-05 19:18:06 +00:00
|
|
|
if (IS_NOPOLLING_TYPE(ha))
|
|
|
|
return;
|
2007-07-19 22:06:00 +00:00
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
ha->interrupts_on = 0;
|
2020-05-18 21:17:08 +00:00
|
|
|
wrt_reg_dword(®->ictrl, 0);
|
|
|
|
rd_reg_dword(®->ictrl);
|
2007-07-19 22:06:00 +00:00
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
}
|
|
|
|
|
2011-11-18 17:03:16 +00:00
|
|
|
static int
|
|
|
|
qla2x00_iospace_config(struct qla_hw_data *ha)
|
|
|
|
{
|
|
|
|
resource_size_t pio;
|
|
|
|
uint16_t msix;
|
|
|
|
|
|
|
|
if (pci_request_selected_regions(ha->pdev, ha->bars,
|
|
|
|
QLA2XXX_DRIVER_NAME)) {
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
|
|
|
|
"Failed to reserve PIO/MMIO regions (%s), aborting.\n",
|
|
|
|
pci_name(ha->pdev));
|
|
|
|
goto iospace_error_exit;
|
|
|
|
}
|
|
|
|
if (!(ha->bars & 1))
|
|
|
|
goto skip_pio;
|
|
|
|
|
|
|
|
/* We only need PIO for Flash operations on ISP2312 v2 chips. */
|
|
|
|
pio = pci_resource_start(ha->pdev, 0);
|
|
|
|
if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
|
|
|
|
if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
|
|
|
|
ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
|
|
|
|
"Invalid pci I/O region size (%s).\n",
|
|
|
|
pci_name(ha->pdev));
|
|
|
|
pio = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
|
|
|
|
"Region #0 no a PIO resource (%s).\n",
|
|
|
|
pci_name(ha->pdev));
|
|
|
|
pio = 0;
|
|
|
|
}
|
|
|
|
ha->pio_address = pio;
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
|
|
|
|
"PIO address=%llu.\n",
|
|
|
|
(unsigned long long)ha->pio_address);
|
|
|
|
|
|
|
|
skip_pio:
|
|
|
|
/* Use MMIO operations for all accesses. */
|
|
|
|
if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
|
|
|
|
"Region #1 not an MMIO resource (%s), aborting.\n",
|
|
|
|
pci_name(ha->pdev));
|
|
|
|
goto iospace_error_exit;
|
|
|
|
}
|
|
|
|
if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
|
|
|
|
"Invalid PCI mem region size (%s), aborting.\n",
|
|
|
|
pci_name(ha->pdev));
|
|
|
|
goto iospace_error_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
|
|
|
|
if (!ha->iobase) {
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
|
|
|
|
"Cannot remap MMIO (%s), aborting.\n",
|
|
|
|
pci_name(ha->pdev));
|
|
|
|
goto iospace_error_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Determine queue resources */
|
|
|
|
ha->max_req_queues = ha->max_rsp_queues = 1;
|
2017-02-15 23:37:19 +00:00
|
|
|
ha->msix_count = QLA_BASE_VECTORS;
|
2020-08-06 11:10:11 +00:00
|
|
|
|
|
|
|
/* Check if FW supports MQ or not */
|
|
|
|
if (!(ha->fw_attributes & BIT_6))
|
|
|
|
goto mqiobase_exit;
|
|
|
|
|
2017-10-13 22:43:22 +00:00
|
|
|
if (!ql2xmqsupport || !ql2xnvmeenable ||
|
|
|
|
(!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
|
2011-11-18 17:03:16 +00:00
|
|
|
goto mqiobase_exit;
|
|
|
|
|
|
|
|
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
|
|
|
|
pci_resource_len(ha->pdev, 3));
|
|
|
|
if (ha->mqiobase) {
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
|
|
|
|
"MQIO Base=%p.\n", ha->mqiobase);
|
|
|
|
/* Read MSIX vector size of the board */
|
|
|
|
pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
|
2016-12-12 22:40:07 +00:00
|
|
|
ha->msix_count = msix + 1;
|
2011-11-18 17:03:16 +00:00
|
|
|
/* Max queues are bounded by available msix vectors */
|
2016-12-12 22:40:07 +00:00
|
|
|
/* MB interrupt uses 1 vector */
|
|
|
|
ha->max_req_queues = ha->msix_count - 1;
|
|
|
|
ha->max_rsp_queues = ha->max_req_queues;
|
|
|
|
/* Queue pairs is the max value minus the base queue pair */
|
|
|
|
ha->max_qpairs = ha->max_rsp_queues - 1;
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
|
|
|
|
"Max no of queues pairs: %d.\n", ha->max_qpairs);
|
|
|
|
|
2011-11-18 17:03:16 +00:00
|
|
|
ql_log_pci(ql_log_info, ha->pdev, 0x001a,
|
2016-12-12 22:40:07 +00:00
|
|
|
"MSI-X vector count: %d.\n", ha->msix_count);
|
2011-11-18 17:03:16 +00:00
|
|
|
} else
|
|
|
|
ql_log_pci(ql_log_info, ha->pdev, 0x001b,
|
|
|
|
"BAR 3 not enabled.\n");
|
|
|
|
|
|
|
|
mqiobase_exit:
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
|
2017-02-15 23:37:19 +00:00
|
|
|
"MSIX Count: %d.\n", ha->msix_count);
|
2011-11-18 17:03:16 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
iospace_error_exit:
|
|
|
|
return (-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-09 19:15:34 +00:00
|
|
|
static int
|
|
|
|
qla83xx_iospace_config(struct qla_hw_data *ha)
|
|
|
|
{
|
|
|
|
uint16_t msix;
|
|
|
|
|
|
|
|
if (pci_request_selected_regions(ha->pdev, ha->bars,
|
|
|
|
QLA2XXX_DRIVER_NAME)) {
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
|
|
|
|
"Failed to reserve PIO/MMIO regions (%s), aborting.\n",
|
|
|
|
pci_name(ha->pdev));
|
|
|
|
|
|
|
|
goto iospace_error_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Use MMIO operations for all accesses. */
|
|
|
|
if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
|
|
|
|
ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
|
|
|
|
"Invalid pci I/O region size (%s).\n",
|
|
|
|
pci_name(ha->pdev));
|
|
|
|
goto iospace_error_exit;
|
|
|
|
}
|
|
|
|
if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
|
|
|
|
ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
|
|
|
|
"Invalid PCI mem region size (%s), aborting\n",
|
|
|
|
pci_name(ha->pdev));
|
|
|
|
goto iospace_error_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
|
|
|
|
if (!ha->iobase) {
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
|
|
|
|
"Cannot remap MMIO (%s), aborting.\n",
|
|
|
|
pci_name(ha->pdev));
|
|
|
|
goto iospace_error_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 64bit PCI BAR - BAR2 will correspoond to region 4 */
|
|
|
|
/* 83XX 26XX always use MQ type access for queues
|
|
|
|
* - mbar 2, a.k.a region 4 */
|
|
|
|
ha->max_req_queues = ha->max_rsp_queues = 1;
|
2017-02-15 23:37:19 +00:00
|
|
|
ha->msix_count = QLA_BASE_VECTORS;
|
2012-02-09 19:15:34 +00:00
|
|
|
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
|
|
|
|
pci_resource_len(ha->pdev, 4));
|
|
|
|
|
|
|
|
if (!ha->mqiobase) {
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
|
|
|
|
"BAR2/region4 not enabled\n");
|
|
|
|
goto mqiobase_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
|
|
|
|
pci_resource_len(ha->pdev, 2));
|
|
|
|
if (ha->msixbase) {
|
|
|
|
/* Read MSIX vector size of the board */
|
|
|
|
pci_read_config_word(ha->pdev,
|
|
|
|
QLA_83XX_PCI_MSIX_CONTROL, &msix);
|
2017-06-14 03:47:18 +00:00
|
|
|
ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1;
|
2016-12-12 22:40:09 +00:00
|
|
|
/*
|
|
|
|
* By default, driver uses at least two msix vectors
|
|
|
|
* (default & rspq)
|
|
|
|
*/
|
2017-10-13 22:43:22 +00:00
|
|
|
if (ql2xmqsupport || ql2xnvmeenable) {
|
2016-12-12 22:40:07 +00:00
|
|
|
/* MB interrupt uses 1 vector */
|
|
|
|
ha->max_req_queues = ha->msix_count - 1;
|
2016-12-12 22:40:09 +00:00
|
|
|
|
|
|
|
/* ATIOQ needs 1 vector. That's 1 less QPair */
|
|
|
|
if (QLA_TGT_MODE_ENABLED())
|
|
|
|
ha->max_req_queues--;
|
|
|
|
|
2017-02-15 23:37:20 +00:00
|
|
|
ha->max_rsp_queues = ha->max_req_queues;
|
|
|
|
|
2016-12-12 22:40:07 +00:00
|
|
|
/* Queue pairs is the max value minus
|
|
|
|
* the base queue pair */
|
|
|
|
ha->max_qpairs = ha->max_req_queues - 1;
|
2017-06-02 16:12:01 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3,
|
2016-12-12 22:40:07 +00:00
|
|
|
"Max no of queues pairs: %d.\n", ha->max_qpairs);
|
2012-02-09 19:15:34 +00:00
|
|
|
}
|
|
|
|
ql_log_pci(ql_log_info, ha->pdev, 0x011c,
|
2016-12-12 22:40:07 +00:00
|
|
|
"MSI-X vector count: %d.\n", ha->msix_count);
|
2012-02-09 19:15:34 +00:00
|
|
|
} else
|
|
|
|
ql_log_pci(ql_log_info, ha->pdev, 0x011e,
|
|
|
|
"BAR 1 not enabled.\n");
|
|
|
|
|
|
|
|
mqiobase_exit:
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
|
2017-02-15 23:37:19 +00:00
|
|
|
"MSIX Count: %d.\n", ha->msix_count);
|
2012-02-09 19:15:34 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
iospace_error_exit:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2007-07-19 22:06:00 +00:00
|
|
|
static struct isp_operations qla2100_isp_ops = {
|
|
|
|
.pci_config = qla2100_pci_config,
|
|
|
|
.reset_chip = qla2x00_reset_chip,
|
|
|
|
.chip_diag = qla2x00_chip_diag,
|
|
|
|
.config_rings = qla2x00_config_rings,
|
|
|
|
.reset_adapter = qla2x00_reset_adapter,
|
|
|
|
.nvram_config = qla2x00_nvram_config,
|
|
|
|
.update_fw_options = qla2x00_update_fw_options,
|
|
|
|
.load_risc = qla2x00_load_risc,
|
|
|
|
.pci_info_str = qla2x00_pci_info_str,
|
|
|
|
.fw_version_str = qla2x00_fw_version_str,
|
|
|
|
.intr_handler = qla2100_intr_handler,
|
|
|
|
.enable_intrs = qla2x00_enable_intrs,
|
|
|
|
.disable_intrs = qla2x00_disable_intrs,
|
|
|
|
.abort_command = qla2x00_abort_command,
|
2008-04-03 20:13:24 +00:00
|
|
|
.target_reset = qla2x00_abort_target,
|
|
|
|
.lun_reset = qla2x00_lun_reset,
|
2007-07-19 22:06:00 +00:00
|
|
|
.fabric_login = qla2x00_login_fabric,
|
|
|
|
.fabric_logout = qla2x00_fabric_logout,
|
|
|
|
.calc_req_entries = qla2x00_calc_iocbs_32,
|
|
|
|
.build_iocbs = qla2x00_build_scsi_iocbs_32,
|
|
|
|
.prep_ms_iocb = qla2x00_prep_ms_iocb,
|
|
|
|
.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
|
|
|
|
.read_nvram = qla2x00_read_nvram_data,
|
|
|
|
.write_nvram = qla2x00_write_nvram_data,
|
|
|
|
.fw_dump = qla2100_fw_dump,
|
|
|
|
.beacon_on = NULL,
|
|
|
|
.beacon_off = NULL,
|
|
|
|
.beacon_blink = NULL,
|
|
|
|
.read_optrom = qla2x00_read_optrom_data,
|
|
|
|
.write_optrom = qla2x00_write_optrom_data,
|
|
|
|
.get_flash_version = qla2x00_get_flash_version,
|
2008-11-06 18:40:51 +00:00
|
|
|
.start_scsi = qla2x00_start_scsi,
|
2016-12-12 22:40:07 +00:00
|
|
|
.start_scsi_mq = NULL,
|
2010-04-13 00:59:55 +00:00
|
|
|
.abort_isp = qla2x00_abort_isp,
|
2011-11-18 17:03:16 +00:00
|
|
|
.iospace_config = qla2x00_iospace_config,
|
2013-03-28 12:21:23 +00:00
|
|
|
.initialize_adapter = qla2x00_initialize_adapter,
|
2007-07-19 22:06:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct isp_operations qla2300_isp_ops = {
|
|
|
|
.pci_config = qla2300_pci_config,
|
|
|
|
.reset_chip = qla2x00_reset_chip,
|
|
|
|
.chip_diag = qla2x00_chip_diag,
|
|
|
|
.config_rings = qla2x00_config_rings,
|
|
|
|
.reset_adapter = qla2x00_reset_adapter,
|
|
|
|
.nvram_config = qla2x00_nvram_config,
|
|
|
|
.update_fw_options = qla2x00_update_fw_options,
|
|
|
|
.load_risc = qla2x00_load_risc,
|
|
|
|
.pci_info_str = qla2x00_pci_info_str,
|
|
|
|
.fw_version_str = qla2x00_fw_version_str,
|
|
|
|
.intr_handler = qla2300_intr_handler,
|
|
|
|
.enable_intrs = qla2x00_enable_intrs,
|
|
|
|
.disable_intrs = qla2x00_disable_intrs,
|
|
|
|
.abort_command = qla2x00_abort_command,
|
2008-04-03 20:13:24 +00:00
|
|
|
.target_reset = qla2x00_abort_target,
|
|
|
|
.lun_reset = qla2x00_lun_reset,
|
2007-07-19 22:06:00 +00:00
|
|
|
.fabric_login = qla2x00_login_fabric,
|
|
|
|
.fabric_logout = qla2x00_fabric_logout,
|
|
|
|
.calc_req_entries = qla2x00_calc_iocbs_32,
|
|
|
|
.build_iocbs = qla2x00_build_scsi_iocbs_32,
|
|
|
|
.prep_ms_iocb = qla2x00_prep_ms_iocb,
|
|
|
|
.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
|
|
|
|
.read_nvram = qla2x00_read_nvram_data,
|
|
|
|
.write_nvram = qla2x00_write_nvram_data,
|
|
|
|
.fw_dump = qla2300_fw_dump,
|
|
|
|
.beacon_on = qla2x00_beacon_on,
|
|
|
|
.beacon_off = qla2x00_beacon_off,
|
|
|
|
.beacon_blink = qla2x00_beacon_blink,
|
|
|
|
.read_optrom = qla2x00_read_optrom_data,
|
|
|
|
.write_optrom = qla2x00_write_optrom_data,
|
|
|
|
.get_flash_version = qla2x00_get_flash_version,
|
2008-11-06 18:40:51 +00:00
|
|
|
.start_scsi = qla2x00_start_scsi,
|
2016-12-12 22:40:07 +00:00
|
|
|
.start_scsi_mq = NULL,
|
2010-04-13 00:59:55 +00:00
|
|
|
.abort_isp = qla2x00_abort_isp,
|
2013-08-27 05:37:28 +00:00
|
|
|
.iospace_config = qla2x00_iospace_config,
|
2013-03-28 12:21:23 +00:00
|
|
|
.initialize_adapter = qla2x00_initialize_adapter,
|
2007-07-19 22:06:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct isp_operations qla24xx_isp_ops = {
|
|
|
|
.pci_config = qla24xx_pci_config,
|
|
|
|
.reset_chip = qla24xx_reset_chip,
|
|
|
|
.chip_diag = qla24xx_chip_diag,
|
|
|
|
.config_rings = qla24xx_config_rings,
|
|
|
|
.reset_adapter = qla24xx_reset_adapter,
|
|
|
|
.nvram_config = qla24xx_nvram_config,
|
|
|
|
.update_fw_options = qla24xx_update_fw_options,
|
|
|
|
.load_risc = qla24xx_load_risc,
|
|
|
|
.pci_info_str = qla24xx_pci_info_str,
|
|
|
|
.fw_version_str = qla24xx_fw_version_str,
|
|
|
|
.intr_handler = qla24xx_intr_handler,
|
|
|
|
.enable_intrs = qla24xx_enable_intrs,
|
|
|
|
.disable_intrs = qla24xx_disable_intrs,
|
|
|
|
.abort_command = qla24xx_abort_command,
|
2008-04-03 20:13:24 +00:00
|
|
|
.target_reset = qla24xx_abort_target,
|
|
|
|
.lun_reset = qla24xx_lun_reset,
|
2007-07-19 22:06:00 +00:00
|
|
|
.fabric_login = qla24xx_login_fabric,
|
|
|
|
.fabric_logout = qla24xx_fabric_logout,
|
|
|
|
.calc_req_entries = NULL,
|
|
|
|
.build_iocbs = NULL,
|
|
|
|
.prep_ms_iocb = qla24xx_prep_ms_iocb,
|
|
|
|
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
|
|
|
|
.read_nvram = qla24xx_read_nvram_data,
|
|
|
|
.write_nvram = qla24xx_write_nvram_data,
|
|
|
|
.fw_dump = qla24xx_fw_dump,
|
|
|
|
.beacon_on = qla24xx_beacon_on,
|
|
|
|
.beacon_off = qla24xx_beacon_off,
|
|
|
|
.beacon_blink = qla24xx_beacon_blink,
|
|
|
|
.read_optrom = qla24xx_read_optrom_data,
|
|
|
|
.write_optrom = qla24xx_write_optrom_data,
|
|
|
|
.get_flash_version = qla24xx_get_flash_version,
|
2008-11-06 18:40:51 +00:00
|
|
|
.start_scsi = qla24xx_start_scsi,
|
2016-12-12 22:40:07 +00:00
|
|
|
.start_scsi_mq = NULL,
|
2010-04-13 00:59:55 +00:00
|
|
|
.abort_isp = qla2x00_abort_isp,
|
2013-08-27 05:37:28 +00:00
|
|
|
.iospace_config = qla2x00_iospace_config,
|
2013-03-28 12:21:23 +00:00
|
|
|
.initialize_adapter = qla2x00_initialize_adapter,
|
2007-07-19 22:06:00 +00:00
|
|
|
};
|
|
|
|
|
2007-07-20 03:37:34 +00:00
|
|
|
static struct isp_operations qla25xx_isp_ops = {
|
|
|
|
.pci_config = qla25xx_pci_config,
|
|
|
|
.reset_chip = qla24xx_reset_chip,
|
|
|
|
.chip_diag = qla24xx_chip_diag,
|
|
|
|
.config_rings = qla24xx_config_rings,
|
|
|
|
.reset_adapter = qla24xx_reset_adapter,
|
|
|
|
.nvram_config = qla24xx_nvram_config,
|
|
|
|
.update_fw_options = qla24xx_update_fw_options,
|
|
|
|
.load_risc = qla24xx_load_risc,
|
|
|
|
.pci_info_str = qla24xx_pci_info_str,
|
|
|
|
.fw_version_str = qla24xx_fw_version_str,
|
|
|
|
.intr_handler = qla24xx_intr_handler,
|
|
|
|
.enable_intrs = qla24xx_enable_intrs,
|
|
|
|
.disable_intrs = qla24xx_disable_intrs,
|
|
|
|
.abort_command = qla24xx_abort_command,
|
2008-04-03 20:13:24 +00:00
|
|
|
.target_reset = qla24xx_abort_target,
|
|
|
|
.lun_reset = qla24xx_lun_reset,
|
2007-07-20 03:37:34 +00:00
|
|
|
.fabric_login = qla24xx_login_fabric,
|
|
|
|
.fabric_logout = qla24xx_fabric_logout,
|
|
|
|
.calc_req_entries = NULL,
|
|
|
|
.build_iocbs = NULL,
|
|
|
|
.prep_ms_iocb = qla24xx_prep_ms_iocb,
|
|
|
|
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
|
|
|
|
.read_nvram = qla25xx_read_nvram_data,
|
|
|
|
.write_nvram = qla25xx_write_nvram_data,
|
|
|
|
.fw_dump = qla25xx_fw_dump,
|
|
|
|
.beacon_on = qla24xx_beacon_on,
|
|
|
|
.beacon_off = qla24xx_beacon_off,
|
|
|
|
.beacon_blink = qla24xx_beacon_blink,
|
2007-09-20 21:07:33 +00:00
|
|
|
.read_optrom = qla25xx_read_optrom_data,
|
2007-07-20 03:37:34 +00:00
|
|
|
.write_optrom = qla24xx_write_optrom_data,
|
|
|
|
.get_flash_version = qla24xx_get_flash_version,
|
2010-05-04 22:01:30 +00:00
|
|
|
.start_scsi = qla24xx_dif_start_scsi,
|
2016-12-12 22:40:07 +00:00
|
|
|
.start_scsi_mq = qla2xxx_dif_start_scsi_mq,
|
2010-04-13 00:59:55 +00:00
|
|
|
.abort_isp = qla2x00_abort_isp,
|
2013-08-27 05:37:28 +00:00
|
|
|
.iospace_config = qla2x00_iospace_config,
|
2013-03-28 12:21:23 +00:00
|
|
|
.initialize_adapter = qla2x00_initialize_adapter,
|
2007-07-20 03:37:34 +00:00
|
|
|
};
|
|
|
|
|
2009-01-05 19:18:11 +00:00
|
|
|
static struct isp_operations qla81xx_isp_ops = {
|
|
|
|
.pci_config = qla25xx_pci_config,
|
|
|
|
.reset_chip = qla24xx_reset_chip,
|
|
|
|
.chip_diag = qla24xx_chip_diag,
|
|
|
|
.config_rings = qla24xx_config_rings,
|
|
|
|
.reset_adapter = qla24xx_reset_adapter,
|
|
|
|
.nvram_config = qla81xx_nvram_config,
|
2020-02-26 22:40:07 +00:00
|
|
|
.update_fw_options = qla24xx_update_fw_options,
|
2009-01-22 17:45:32 +00:00
|
|
|
.load_risc = qla81xx_load_risc,
|
2009-01-05 19:18:11 +00:00
|
|
|
.pci_info_str = qla24xx_pci_info_str,
|
|
|
|
.fw_version_str = qla24xx_fw_version_str,
|
|
|
|
.intr_handler = qla24xx_intr_handler,
|
|
|
|
.enable_intrs = qla24xx_enable_intrs,
|
|
|
|
.disable_intrs = qla24xx_disable_intrs,
|
|
|
|
.abort_command = qla24xx_abort_command,
|
|
|
|
.target_reset = qla24xx_abort_target,
|
|
|
|
.lun_reset = qla24xx_lun_reset,
|
|
|
|
.fabric_login = qla24xx_login_fabric,
|
|
|
|
.fabric_logout = qla24xx_fabric_logout,
|
|
|
|
.calc_req_entries = NULL,
|
|
|
|
.build_iocbs = NULL,
|
|
|
|
.prep_ms_iocb = qla24xx_prep_ms_iocb,
|
|
|
|
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
|
2009-03-24 16:08:14 +00:00
|
|
|
.read_nvram = NULL,
|
|
|
|
.write_nvram = NULL,
|
2009-01-05 19:18:11 +00:00
|
|
|
.fw_dump = qla81xx_fw_dump,
|
|
|
|
.beacon_on = qla24xx_beacon_on,
|
|
|
|
.beacon_off = qla24xx_beacon_off,
|
2012-02-09 19:15:34 +00:00
|
|
|
.beacon_blink = qla83xx_beacon_blink,
|
2009-01-05 19:18:11 +00:00
|
|
|
.read_optrom = qla25xx_read_optrom_data,
|
|
|
|
.write_optrom = qla24xx_write_optrom_data,
|
|
|
|
.get_flash_version = qla24xx_get_flash_version,
|
2010-05-28 22:08:27 +00:00
|
|
|
.start_scsi = qla24xx_dif_start_scsi,
|
2016-12-12 22:40:07 +00:00
|
|
|
.start_scsi_mq = qla2xxx_dif_start_scsi_mq,
|
2010-04-13 00:59:55 +00:00
|
|
|
.abort_isp = qla2x00_abort_isp,
|
2013-08-27 05:37:28 +00:00
|
|
|
.iospace_config = qla2x00_iospace_config,
|
2013-03-28 12:21:23 +00:00
|
|
|
.initialize_adapter = qla2x00_initialize_adapter,
|
2010-04-13 00:59:55 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct isp_operations qla82xx_isp_ops = {
|
|
|
|
.pci_config = qla82xx_pci_config,
|
|
|
|
.reset_chip = qla82xx_reset_chip,
|
|
|
|
.chip_diag = qla24xx_chip_diag,
|
|
|
|
.config_rings = qla82xx_config_rings,
|
|
|
|
.reset_adapter = qla24xx_reset_adapter,
|
|
|
|
.nvram_config = qla81xx_nvram_config,
|
|
|
|
.update_fw_options = qla24xx_update_fw_options,
|
|
|
|
.load_risc = qla82xx_load_risc,
|
2012-08-22 18:21:14 +00:00
|
|
|
.pci_info_str = qla24xx_pci_info_str,
|
2010-04-13 00:59:55 +00:00
|
|
|
.fw_version_str = qla24xx_fw_version_str,
|
|
|
|
.intr_handler = qla82xx_intr_handler,
|
|
|
|
.enable_intrs = qla82xx_enable_intrs,
|
|
|
|
.disable_intrs = qla82xx_disable_intrs,
|
|
|
|
.abort_command = qla24xx_abort_command,
|
|
|
|
.target_reset = qla24xx_abort_target,
|
|
|
|
.lun_reset = qla24xx_lun_reset,
|
|
|
|
.fabric_login = qla24xx_login_fabric,
|
|
|
|
.fabric_logout = qla24xx_fabric_logout,
|
|
|
|
.calc_req_entries = NULL,
|
|
|
|
.build_iocbs = NULL,
|
|
|
|
.prep_ms_iocb = qla24xx_prep_ms_iocb,
|
|
|
|
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
|
|
|
|
.read_nvram = qla24xx_read_nvram_data,
|
|
|
|
.write_nvram = qla24xx_write_nvram_data,
|
2014-02-26 09:15:12 +00:00
|
|
|
.fw_dump = qla82xx_fw_dump,
|
2011-08-16 18:31:45 +00:00
|
|
|
.beacon_on = qla82xx_beacon_on,
|
|
|
|
.beacon_off = qla82xx_beacon_off,
|
|
|
|
.beacon_blink = NULL,
|
2010-04-13 00:59:55 +00:00
|
|
|
.read_optrom = qla82xx_read_optrom_data,
|
|
|
|
.write_optrom = qla82xx_write_optrom_data,
|
2013-08-27 05:37:28 +00:00
|
|
|
.get_flash_version = qla82xx_get_flash_version,
|
2010-04-13 00:59:55 +00:00
|
|
|
.start_scsi = qla82xx_start_scsi,
|
2016-12-12 22:40:07 +00:00
|
|
|
.start_scsi_mq = NULL,
|
2010-04-13 00:59:55 +00:00
|
|
|
.abort_isp = qla82xx_abort_isp,
|
2011-11-18 17:03:16 +00:00
|
|
|
.iospace_config = qla82xx_iospace_config,
|
2013-03-28 12:21:23 +00:00
|
|
|
.initialize_adapter = qla2x00_initialize_adapter,
|
2009-01-05 19:18:11 +00:00
|
|
|
};
|
|
|
|
|
2013-08-27 05:37:28 +00:00
|
|
|
static struct isp_operations qla8044_isp_ops = {
|
|
|
|
.pci_config = qla82xx_pci_config,
|
|
|
|
.reset_chip = qla82xx_reset_chip,
|
|
|
|
.chip_diag = qla24xx_chip_diag,
|
|
|
|
.config_rings = qla82xx_config_rings,
|
|
|
|
.reset_adapter = qla24xx_reset_adapter,
|
|
|
|
.nvram_config = qla81xx_nvram_config,
|
|
|
|
.update_fw_options = qla24xx_update_fw_options,
|
|
|
|
.load_risc = qla82xx_load_risc,
|
|
|
|
.pci_info_str = qla24xx_pci_info_str,
|
|
|
|
.fw_version_str = qla24xx_fw_version_str,
|
|
|
|
.intr_handler = qla8044_intr_handler,
|
|
|
|
.enable_intrs = qla82xx_enable_intrs,
|
|
|
|
.disable_intrs = qla82xx_disable_intrs,
|
|
|
|
.abort_command = qla24xx_abort_command,
|
|
|
|
.target_reset = qla24xx_abort_target,
|
|
|
|
.lun_reset = qla24xx_lun_reset,
|
|
|
|
.fabric_login = qla24xx_login_fabric,
|
|
|
|
.fabric_logout = qla24xx_fabric_logout,
|
|
|
|
.calc_req_entries = NULL,
|
|
|
|
.build_iocbs = NULL,
|
|
|
|
.prep_ms_iocb = qla24xx_prep_ms_iocb,
|
|
|
|
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
|
|
|
|
.read_nvram = NULL,
|
|
|
|
.write_nvram = NULL,
|
2014-02-26 09:15:12 +00:00
|
|
|
.fw_dump = qla8044_fw_dump,
|
2013-08-27 05:37:28 +00:00
|
|
|
.beacon_on = qla82xx_beacon_on,
|
|
|
|
.beacon_off = qla82xx_beacon_off,
|
|
|
|
.beacon_blink = NULL,
|
2014-02-26 09:15:13 +00:00
|
|
|
.read_optrom = qla8044_read_optrom_data,
|
2013-08-27 05:37:28 +00:00
|
|
|
.write_optrom = qla8044_write_optrom_data,
|
|
|
|
.get_flash_version = qla82xx_get_flash_version,
|
|
|
|
.start_scsi = qla82xx_start_scsi,
|
2016-12-12 22:40:07 +00:00
|
|
|
.start_scsi_mq = NULL,
|
2013-08-27 05:37:28 +00:00
|
|
|
.abort_isp = qla8044_abort_isp,
|
|
|
|
.iospace_config = qla82xx_iospace_config,
|
|
|
|
.initialize_adapter = qla2x00_initialize_adapter,
|
|
|
|
};
|
|
|
|
|
2012-02-09 19:15:34 +00:00
|
|
|
static struct isp_operations qla83xx_isp_ops = {
|
|
|
|
.pci_config = qla25xx_pci_config,
|
|
|
|
.reset_chip = qla24xx_reset_chip,
|
|
|
|
.chip_diag = qla24xx_chip_diag,
|
|
|
|
.config_rings = qla24xx_config_rings,
|
|
|
|
.reset_adapter = qla24xx_reset_adapter,
|
|
|
|
.nvram_config = qla81xx_nvram_config,
|
2020-02-26 22:40:07 +00:00
|
|
|
.update_fw_options = qla24xx_update_fw_options,
|
2012-02-09 19:15:34 +00:00
|
|
|
.load_risc = qla81xx_load_risc,
|
|
|
|
.pci_info_str = qla24xx_pci_info_str,
|
|
|
|
.fw_version_str = qla24xx_fw_version_str,
|
|
|
|
.intr_handler = qla24xx_intr_handler,
|
|
|
|
.enable_intrs = qla24xx_enable_intrs,
|
|
|
|
.disable_intrs = qla24xx_disable_intrs,
|
|
|
|
.abort_command = qla24xx_abort_command,
|
|
|
|
.target_reset = qla24xx_abort_target,
|
|
|
|
.lun_reset = qla24xx_lun_reset,
|
|
|
|
.fabric_login = qla24xx_login_fabric,
|
|
|
|
.fabric_logout = qla24xx_fabric_logout,
|
|
|
|
.calc_req_entries = NULL,
|
|
|
|
.build_iocbs = NULL,
|
|
|
|
.prep_ms_iocb = qla24xx_prep_ms_iocb,
|
|
|
|
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
|
|
|
|
.read_nvram = NULL,
|
|
|
|
.write_nvram = NULL,
|
|
|
|
.fw_dump = qla83xx_fw_dump,
|
|
|
|
.beacon_on = qla24xx_beacon_on,
|
|
|
|
.beacon_off = qla24xx_beacon_off,
|
|
|
|
.beacon_blink = qla83xx_beacon_blink,
|
|
|
|
.read_optrom = qla25xx_read_optrom_data,
|
|
|
|
.write_optrom = qla24xx_write_optrom_data,
|
|
|
|
.get_flash_version = qla24xx_get_flash_version,
|
|
|
|
.start_scsi = qla24xx_dif_start_scsi,
|
2016-12-12 22:40:07 +00:00
|
|
|
.start_scsi_mq = qla2xxx_dif_start_scsi_mq,
|
2012-02-09 19:15:34 +00:00
|
|
|
.abort_isp = qla2x00_abort_isp,
|
|
|
|
.iospace_config = qla83xx_iospace_config,
|
2013-03-28 12:21:23 +00:00
|
|
|
.initialize_adapter = qla2x00_initialize_adapter,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct isp_operations qlafx00_isp_ops = {
|
|
|
|
.pci_config = qlafx00_pci_config,
|
|
|
|
.reset_chip = qlafx00_soft_reset,
|
|
|
|
.chip_diag = qlafx00_chip_diag,
|
|
|
|
.config_rings = qlafx00_config_rings,
|
|
|
|
.reset_adapter = qlafx00_soft_reset,
|
|
|
|
.nvram_config = NULL,
|
|
|
|
.update_fw_options = NULL,
|
|
|
|
.load_risc = NULL,
|
|
|
|
.pci_info_str = qlafx00_pci_info_str,
|
|
|
|
.fw_version_str = qlafx00_fw_version_str,
|
|
|
|
.intr_handler = qlafx00_intr_handler,
|
|
|
|
.enable_intrs = qlafx00_enable_intrs,
|
|
|
|
.disable_intrs = qlafx00_disable_intrs,
|
2014-02-26 09:15:18 +00:00
|
|
|
.abort_command = qla24xx_async_abort_command,
|
2013-03-28 12:21:23 +00:00
|
|
|
.target_reset = qlafx00_abort_target,
|
|
|
|
.lun_reset = qlafx00_lun_reset,
|
|
|
|
.fabric_login = NULL,
|
|
|
|
.fabric_logout = NULL,
|
|
|
|
.calc_req_entries = NULL,
|
|
|
|
.build_iocbs = NULL,
|
|
|
|
.prep_ms_iocb = qla24xx_prep_ms_iocb,
|
|
|
|
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
|
|
|
|
.read_nvram = qla24xx_read_nvram_data,
|
|
|
|
.write_nvram = qla24xx_write_nvram_data,
|
|
|
|
.fw_dump = NULL,
|
|
|
|
.beacon_on = qla24xx_beacon_on,
|
|
|
|
.beacon_off = qla24xx_beacon_off,
|
|
|
|
.beacon_blink = NULL,
|
|
|
|
.read_optrom = qla24xx_read_optrom_data,
|
|
|
|
.write_optrom = qla24xx_write_optrom_data,
|
|
|
|
.get_flash_version = qla24xx_get_flash_version,
|
|
|
|
.start_scsi = qlafx00_start_scsi,
|
2016-12-12 22:40:07 +00:00
|
|
|
.start_scsi_mq = NULL,
|
2013-03-28 12:21:23 +00:00
|
|
|
.abort_isp = qlafx00_abort_isp,
|
|
|
|
.iospace_config = qlafx00_iospace_config,
|
|
|
|
.initialize_adapter = qlafx00_initialize_adapter,
|
2012-02-09 19:15:34 +00:00
|
|
|
};
|
|
|
|
|
2014-02-26 09:15:06 +00:00
|
|
|
static struct isp_operations qla27xx_isp_ops = {
|
|
|
|
.pci_config = qla25xx_pci_config,
|
|
|
|
.reset_chip = qla24xx_reset_chip,
|
|
|
|
.chip_diag = qla24xx_chip_diag,
|
|
|
|
.config_rings = qla24xx_config_rings,
|
|
|
|
.reset_adapter = qla24xx_reset_adapter,
|
|
|
|
.nvram_config = qla81xx_nvram_config,
|
2019-07-26 16:07:37 +00:00
|
|
|
.update_fw_options = qla24xx_update_fw_options,
|
2014-02-26 09:15:06 +00:00
|
|
|
.load_risc = qla81xx_load_risc,
|
|
|
|
.pci_info_str = qla24xx_pci_info_str,
|
|
|
|
.fw_version_str = qla24xx_fw_version_str,
|
|
|
|
.intr_handler = qla24xx_intr_handler,
|
|
|
|
.enable_intrs = qla24xx_enable_intrs,
|
|
|
|
.disable_intrs = qla24xx_disable_intrs,
|
|
|
|
.abort_command = qla24xx_abort_command,
|
|
|
|
.target_reset = qla24xx_abort_target,
|
|
|
|
.lun_reset = qla24xx_lun_reset,
|
|
|
|
.fabric_login = qla24xx_login_fabric,
|
|
|
|
.fabric_logout = qla24xx_fabric_logout,
|
|
|
|
.calc_req_entries = NULL,
|
|
|
|
.build_iocbs = NULL,
|
|
|
|
.prep_ms_iocb = qla24xx_prep_ms_iocb,
|
|
|
|
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
|
|
|
|
.read_nvram = NULL,
|
|
|
|
.write_nvram = NULL,
|
|
|
|
.fw_dump = qla27xx_fwdump,
|
2020-03-31 10:40:13 +00:00
|
|
|
.mpi_fw_dump = qla27xx_mpi_fwdump,
|
2014-02-26 09:15:06 +00:00
|
|
|
.beacon_on = qla24xx_beacon_on,
|
|
|
|
.beacon_off = qla24xx_beacon_off,
|
|
|
|
.beacon_blink = qla83xx_beacon_blink,
|
|
|
|
.read_optrom = qla25xx_read_optrom_data,
|
|
|
|
.write_optrom = qla24xx_write_optrom_data,
|
|
|
|
.get_flash_version = qla24xx_get_flash_version,
|
|
|
|
.start_scsi = qla24xx_dif_start_scsi,
|
2016-12-12 22:40:07 +00:00
|
|
|
.start_scsi_mq = qla2xxx_dif_start_scsi_mq,
|
2014-02-26 09:15:06 +00:00
|
|
|
.abort_isp = qla2x00_abort_isp,
|
|
|
|
.iospace_config = qla83xx_iospace_config,
|
|
|
|
.initialize_adapter = qla2x00_initialize_adapter,
|
|
|
|
};
|
|
|
|
|
2006-03-09 22:27:08 +00:00
|
|
|
static inline void
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_set_isp_flags(struct qla_hw_data *ha)
|
2006-03-09 22:27:08 +00:00
|
|
|
{
|
|
|
|
ha->device_type = DT_EXTENDED_IDS;
|
|
|
|
switch (ha->pdev->device) {
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2100:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2100;
|
2006-03-09 22:27:08 +00:00
|
|
|
ha->device_type &= ~DT_EXTENDED_IDS;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2100;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2200:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2200;
|
2006-03-09 22:27:08 +00:00
|
|
|
ha->device_type &= ~DT_EXTENDED_IDS;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2100;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2300:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2300;
|
2006-03-09 22:27:39 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2312:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2312;
|
2006-03-09 22:27:39 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2322:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2322;
|
2006-03-09 22:27:39 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
2006-03-09 22:27:08 +00:00
|
|
|
if (ha->pdev->subsystem_vendor == 0x1028 &&
|
|
|
|
ha->pdev->subsystem_device == 0x0170)
|
|
|
|
ha->device_type |= DT_OEM_001;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP6312:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP6312;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP6322:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP6322;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2422:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2422;
|
2006-03-09 22:27:39 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
2007-07-19 22:05:56 +00:00
|
|
|
ha->device_type |= DT_FWI2;
|
2007-07-19 22:05:57 +00:00
|
|
|
ha->device_type |= DT_IIDMA;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2432:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2432;
|
2006-03-09 22:27:39 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
2007-07-19 22:05:56 +00:00
|
|
|
ha->device_type |= DT_FWI2;
|
2007-07-19 22:05:57 +00:00
|
|
|
ha->device_type |= DT_IIDMA;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
2008-04-03 20:13:26 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP8432:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP8432;
|
2008-04-03 20:13:26 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->device_type |= DT_IIDMA;
|
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
|
|
|
break;
|
2006-03-09 22:27:13 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP5422:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP5422;
|
2007-07-19 22:05:56 +00:00
|
|
|
ha->device_type |= DT_FWI2;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
2006-03-09 22:27:13 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP5432:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP5432;
|
2007-07-19 22:05:56 +00:00
|
|
|
ha->device_type |= DT_FWI2;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
2007-07-20 03:37:34 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2532:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2532;
|
2007-07-20 03:37:34 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->device_type |= DT_IIDMA;
|
2006-05-17 22:09:34 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
2006-03-09 22:27:08 +00:00
|
|
|
break;
|
2009-01-05 19:18:11 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP8001:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP8001;
|
2009-01-05 19:18:11 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->device_type |= DT_IIDMA;
|
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
|
|
|
break;
|
2010-04-13 00:59:55 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP8021:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP8021;
|
2010-04-13 00:59:55 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
|
|
|
/* Initialize 82XX ISP flags */
|
|
|
|
qla82xx_init_flags(ha);
|
|
|
|
break;
|
2013-08-27 05:37:28 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP8044:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP8044;
|
2013-08-27 05:37:28 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
|
|
|
/* Initialize 82XX ISP flags */
|
|
|
|
qla82xx_init_flags(ha);
|
|
|
|
break;
|
2012-02-09 19:15:34 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2031:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2031;
|
2012-02-09 19:15:34 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->device_type |= DT_IIDMA;
|
|
|
|
ha->device_type |= DT_T10_PI;
|
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
|
|
|
break;
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP8031:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP8031;
|
2012-02-09 19:15:34 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->device_type |= DT_IIDMA;
|
|
|
|
ha->device_type |= DT_T10_PI;
|
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
|
|
|
break;
|
2013-03-28 12:21:23 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISPF001:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISPFX00;
|
2013-03-28 12:21:23 +00:00
|
|
|
break;
|
2014-02-26 09:15:06 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2071:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2071;
|
2014-02-26 09:15:06 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->device_type |= DT_IIDMA;
|
2016-01-27 17:03:36 +00:00
|
|
|
ha->device_type |= DT_T10_PI;
|
2014-02-26 09:15:06 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
|
|
|
break;
|
2014-04-11 20:54:13 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2271:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2271;
|
2014-04-11 20:54:13 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->device_type |= DT_IIDMA;
|
2016-01-27 17:03:36 +00:00
|
|
|
ha->device_type |= DT_T10_PI;
|
2014-04-11 20:54:13 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
|
|
|
break;
|
2015-08-04 17:38:03 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2261:
|
2016-07-06 15:14:31 +00:00
|
|
|
ha->isp_type |= DT_ISP2261;
|
2015-08-04 17:38:03 +00:00
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->device_type |= DT_IIDMA;
|
2016-01-27 17:03:36 +00:00
|
|
|
ha->device_type |= DT_T10_PI;
|
2015-08-04 17:38:03 +00:00
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
|
|
|
break;
|
2019-03-12 18:08:13 +00:00
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2081:
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2089:
|
|
|
|
ha->isp_type |= DT_ISP2081;
|
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->device_type |= DT_IIDMA;
|
|
|
|
ha->device_type |= DT_T10_PI;
|
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
|
|
|
break;
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2281:
|
|
|
|
case PCI_DEVICE_ID_QLOGIC_ISP2289:
|
|
|
|
ha->isp_type |= DT_ISP2281;
|
|
|
|
ha->device_type |= DT_ZIO_SUPPORTED;
|
|
|
|
ha->device_type |= DT_FWI2;
|
|
|
|
ha->device_type |= DT_IIDMA;
|
|
|
|
ha->device_type |= DT_T10_PI;
|
|
|
|
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
|
|
|
break;
|
2006-03-09 22:27:08 +00:00
|
|
|
}
|
2009-04-07 05:33:50 +00:00
|
|
|
|
2010-04-13 00:59:55 +00:00
|
|
|
if (IS_QLA82XX(ha))
|
2014-02-26 09:15:16 +00:00
|
|
|
ha->port_no = ha->portnum & 1;
|
2014-02-26 09:15:06 +00:00
|
|
|
else {
|
2010-04-13 00:59:55 +00:00
|
|
|
/* Get adapter physical port no from interrupt pin register. */
|
|
|
|
pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
|
2019-03-12 18:08:13 +00:00
|
|
|
if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
|
|
|
|
IS_QLA27XX(ha) || IS_QLA28XX(ha))
|
2014-02-26 09:15:06 +00:00
|
|
|
ha->port_no--;
|
|
|
|
else
|
|
|
|
ha->port_no = !(ha->port_no & 1);
|
|
|
|
}
|
2010-04-13 00:59:55 +00:00
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
|
2011-11-18 17:03:06 +00:00
|
|
|
"device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
|
2014-02-26 09:15:06 +00:00
|
|
|
ha->device_type, ha->port_no, ha->fw_srisc_address);
|
2006-03-09 22:27:08 +00:00
|
|
|
}
|
|
|
|
|
2006-11-22 16:24:48 +00:00
|
|
|
static void
|
|
|
|
qla2xxx_scan_start(struct Scsi_Host *shost)
|
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *vha = shost_priv(shost);
|
2006-11-22 16:24:48 +00:00
|
|
|
|
2009-06-03 16:55:17 +00:00
|
|
|
if (vha->hw->flags.running_gold_fw)
|
|
|
|
return;
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
|
|
|
|
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
|
|
|
|
set_bit(RSCN_UPDATE, &vha->dpc_flags);
|
|
|
|
set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
|
2006-11-22 16:24:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
|
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *vha = shost_priv(shost);
|
2006-11-22 16:24:48 +00:00
|
|
|
|
scsi: qla2xxx: Fix scsi scan hang triggered if adapter fails during init
A system can get hung task timeouts if a qlogic board fails during
initialization (if the board breaks again or fails the init). The hang
involves the scsi scan.
In a nutshell, since commit beb9e315e6e0 ("qla2xxx: Prevent removal and
board_disable race"):
...it is possible to have freed ha (base_vha->hw) early by a call to
qla2x00_remove_one when pdev->enable_cnt equals zero:
if (!atomic_read(&pdev->enable_cnt)) {
scsi_host_put(base_vha->host);
kfree(ha);
pci_set_drvdata(pdev, NULL);
return;
Almost always, the scsi_host_put above frees the vha structure
(attached to the end of the Scsi_Host we're putting) since it's the last
put, and life is good. However, if we are entering this routine because
the adapter has broken sometime during initialization AND a scsi scan is
already in progress (and has done its own scsi_host_get), vha will not
be freed. What's worse, the scsi scan will access the freed ha structure
through qla2xxx_scan_finished:
if (time > vha->hw->loop_reset_delay * HZ)
return 1;
The scsi scan keeps checking to see if a scan is complete by calling
qla2xxx_scan_finished. There is a timeout value that limits the length
of time a scan can take (hw->loop_reset_delay, usually set to 5
seconds), but this definition is in the data structure (hw) that can get
freed early.
This can yield unpredictable results, the worst of which is that the
scsi scan can hang indefinitely. This happens when the freed structure
gets reused and loop_reset_delay gets overwritten with garbage, which
the scan obliviously uses as its timeout value.
The fix for this is simple: at the top of qla2xxx_scan_finished, check
for the UNLOADING bit in the vha structure (_vha is not freed at this
point). If UNLOADING is set, we exit the scan for this adapter
immediately. After this last reference to the ha structure, we'll exit
the scan for this adapter, and continue on.
This problem is hard to hit, but I have run into it doing negative
testing many times now (with a test specifically designed to bring it
out), so I can verify that this fix works. My testing has been against a
RHEL7 driver variant, but the bug and patch are equally relevant to to
the upstream driver.
Fixes: beb9e315e6e0 ("qla2xxx: Prevent removal and board_disable race")
Cc: <stable@vger.kernel.org> # v3.18+
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2016-10-21 20:45:27 +00:00
|
|
|
if (test_bit(UNLOADING, &vha->dpc_flags))
|
|
|
|
return 1;
|
2008-11-06 18:40:51 +00:00
|
|
|
if (!vha->host)
|
2006-11-22 16:24:48 +00:00
|
|
|
return 1;
|
2008-11-06 18:40:51 +00:00
|
|
|
if (time > vha->hw->loop_reset_delay * HZ)
|
2006-11-22 16:24:48 +00:00
|
|
|
return 1;
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
return atomic_read(&vha->loop_state) == LOOP_READY;
|
2006-11-22 16:24:48 +00:00
|
|
|
}
|
|
|
|
|
2017-03-15 16:48:55 +00:00
|
|
|
static void qla2x00_iocb_work_fn(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct scsi_qla_host *vha = container_of(work,
|
|
|
|
struct scsi_qla_host, iocb_work);
|
2017-12-28 20:33:16 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
2018-09-04 21:19:16 +00:00
|
|
|
int i = 2;
|
2017-12-28 20:33:16 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (test_bit(UNLOADING, &base_vha->dpc_flags))
|
|
|
|
return;
|
2017-03-15 16:48:55 +00:00
|
|
|
|
2017-12-28 20:33:16 +00:00
|
|
|
while (!list_empty(&vha->work_list) && i > 0) {
|
2017-03-15 16:48:55 +00:00
|
|
|
qla2x00_do_work(vha);
|
2017-12-28 20:33:16 +00:00
|
|
|
i--;
|
2017-03-15 16:48:55 +00:00
|
|
|
}
|
2017-12-28 20:33:16 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&vha->work_lock, flags);
|
|
|
|
clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
|
|
|
|
spin_unlock_irqrestore(&vha->work_lock, flags);
|
2017-03-15 16:48:55 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* PCI driver interface
|
|
|
|
*/
|
2012-12-21 21:08:55 +00:00
|
|
|
static int
|
2006-06-23 23:11:22 +00:00
|
|
|
qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-06-10 00:21:28 +00:00
|
|
|
int ret = -ENODEV;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct Scsi_Host *host;
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *base_vha = NULL;
|
|
|
|
struct qla_hw_data *ha;
|
2007-08-13 01:22:52 +00:00
|
|
|
char pci_info[30];
|
2012-08-22 18:21:03 +00:00
|
|
|
char fw_str[30], wq_name[30];
|
2005-11-09 23:49:04 +00:00
|
|
|
struct scsi_host_template *sht;
|
2012-02-09 19:15:57 +00:00
|
|
|
int bars, mem_only = 0;
|
2008-11-06 18:40:51 +00:00
|
|
|
uint16_t req_length = 0, rsp_length = 0;
|
2008-12-10 00:45:39 +00:00
|
|
|
struct req_que *req = NULL;
|
|
|
|
struct rsp_que *rsp = NULL;
|
2016-12-12 22:40:08 +00:00
|
|
|
int i;
|
2016-12-12 22:40:07 +00:00
|
|
|
|
2007-10-19 22:59:17 +00:00
|
|
|
bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
|
2009-03-24 16:07:56 +00:00
|
|
|
sht = &qla2xxx_driver_template;
|
2005-11-09 23:49:04 +00:00
|
|
|
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
|
2006-12-14 03:20:29 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
|
2008-04-03 20:13:26 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
|
2006-12-14 03:20:29 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
|
2007-07-20 03:37:34 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
|
2009-01-05 19:18:11 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
|
2010-04-13 00:59:55 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
|
2012-02-09 19:15:34 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
|
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
|
2013-03-28 12:21:23 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
|
2013-08-27 05:37:28 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
|
2014-02-26 09:15:06 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
|
2014-04-11 20:54:13 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
|
2015-08-04 17:38:03 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
|
2019-03-12 18:08:13 +00:00
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
|
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
|
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
|
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
|
|
|
|
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
|
2007-10-19 22:59:17 +00:00
|
|
|
bars = pci_select_bars(pdev, IORESOURCE_MEM);
|
2007-12-20 04:28:09 +00:00
|
|
|
mem_only = 1;
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
|
|
|
|
"Mem only adapter.\n");
|
2007-10-19 22:59:17 +00:00
|
|
|
}
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
|
|
|
|
"Bars=%d.\n", bars);
|
2007-10-19 22:59:17 +00:00
|
|
|
|
2007-12-20 04:28:09 +00:00
|
|
|
if (mem_only) {
|
|
|
|
if (pci_enable_device_mem(pdev))
|
2017-05-23 14:50:47 +00:00
|
|
|
return ret;
|
2007-12-20 04:28:09 +00:00
|
|
|
} else {
|
|
|
|
if (pci_enable_device(pdev))
|
2017-05-23 14:50:47 +00:00
|
|
|
return ret;
|
2007-12-20 04:28:09 +00:00
|
|
|
}
|
2007-10-19 22:59:17 +00:00
|
|
|
|
2021-08-10 04:37:17 +00:00
|
|
|
if (is_kdump_kernel()) {
|
|
|
|
ql2xmqsupport = 0;
|
|
|
|
ql2xallocfwdump = 0;
|
|
|
|
}
|
|
|
|
|
2008-10-19 00:33:19 +00:00
|
|
|
/* This may fail but that's ok */
|
|
|
|
pci_enable_pcie_error_reporting(pdev);
|
2007-10-19 22:59:17 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
|
|
|
|
if (!ha) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log_pci(ql_log_fatal, pdev, 0x0009,
|
|
|
|
"Unable to allocate memory for ha.\n");
|
2017-05-23 14:50:47 +00:00
|
|
|
goto disable_device;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
|
|
|
|
"Memory allocated for ha=%p.\n", ha);
|
2008-11-06 18:40:51 +00:00
|
|
|
ha->pdev = pdev;
|
2014-09-25 10:14:55 +00:00
|
|
|
INIT_LIST_HEAD(&ha->tgt.q_full_list);
|
|
|
|
spin_lock_init(&ha->tgt.q_full_lock);
|
2015-12-17 19:57:04 +00:00
|
|
|
spin_lock_init(&ha->tgt.sess_lock);
|
2015-12-17 19:57:07 +00:00
|
|
|
spin_lock_init(&ha->tgt.atio_lock);
|
|
|
|
|
2021-06-24 05:26:00 +00:00
|
|
|
spin_lock_init(&ha->sadb_lock);
|
|
|
|
INIT_LIST_HEAD(&ha->sadb_tx_index_list);
|
|
|
|
INIT_LIST_HEAD(&ha->sadb_rx_index_list);
|
|
|
|
|
|
|
|
spin_lock_init(&ha->sadb_fp_lock);
|
|
|
|
|
|
|
|
if (qla_edif_sadb_build_free_pool(ha)) {
|
|
|
|
kfree(ha);
|
|
|
|
goto disable_device;
|
|
|
|
}
|
|
|
|
|
2017-07-21 16:32:25 +00:00
|
|
|
atomic_set(&ha->nvme_active_aen_cnt, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Clear our data area */
|
2007-10-19 22:59:17 +00:00
|
|
|
ha->bars = bars;
|
2007-12-20 04:28:09 +00:00
|
|
|
ha->mem_only = mem_only;
|
2008-01-31 20:33:46 +00:00
|
|
|
spin_lock_init(&ha->hardware_lock);
|
2010-10-15 18:27:45 +00:00
|
|
|
spin_lock_init(&ha->vport_slock);
|
2012-08-22 18:21:01 +00:00
|
|
|
mutex_init(&ha->selflogin_lock);
|
2014-02-26 09:14:56 +00:00
|
|
|
mutex_init(&ha->optrom_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-03-09 22:27:08 +00:00
|
|
|
/* Set ISP-type information. */
|
|
|
|
qla2x00_set_isp_flags(ha);
|
2009-12-16 05:29:47 +00:00
|
|
|
|
|
|
|
/* Set EEH reset type to fundamental if required by hba */
|
2012-08-22 18:21:20 +00:00
|
|
|
if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
|
2019-03-12 18:08:13 +00:00
|
|
|
IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
|
2009-12-16 05:29:47 +00:00
|
|
|
pdev->needs_freset = 1;
|
|
|
|
|
2011-11-18 17:03:21 +00:00
|
|
|
ha->prev_topology = 0;
|
|
|
|
ha->init_cb_size = sizeof(init_cb_t);
|
|
|
|
ha->link_data_rate = PORT_SPEED_UNKNOWN;
|
|
|
|
ha->optrom_size = OPTROM_SIZE_2300;
|
2017-12-28 20:33:12 +00:00
|
|
|
ha->max_exchg = FW_MAX_EXCHANGES_CNT;
|
2018-08-02 20:16:52 +00:00
|
|
|
atomic_set(&ha->num_pend_mbx_stage1, 0);
|
|
|
|
atomic_set(&ha->num_pend_mbx_stage2, 0);
|
|
|
|
atomic_set(&ha->num_pend_mbx_stage3, 0);
|
2018-09-04 21:19:14 +00:00
|
|
|
atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
|
|
|
|
ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
|
2011-11-18 17:03:21 +00:00
|
|
|
|
2005-07-06 17:30:05 +00:00
|
|
|
/* Assign ISP specific operations. */
|
2005-04-16 22:20:36 +00:00
|
|
|
if (IS_QLA2100(ha)) {
|
2012-02-09 19:15:57 +00:00
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
|
2005-04-16 22:20:36 +00:00
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
|
2008-11-06 18:40:51 +00:00
|
|
|
req_length = REQUEST_ENTRY_CNT_2100;
|
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_2100;
|
|
|
|
ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
|
2005-07-06 17:30:05 +00:00
|
|
|
ha->gid_list_info_size = 4;
|
2009-01-05 19:18:11 +00:00
|
|
|
ha->flash_conf_off = ~0;
|
|
|
|
ha->flash_data_off = ~0;
|
|
|
|
ha->nvram_conf_off = ~0;
|
|
|
|
ha->nvram_data_off = ~0;
|
2007-07-19 22:06:00 +00:00
|
|
|
ha->isp_ops = &qla2100_isp_ops;
|
2005-04-16 22:20:36 +00:00
|
|
|
} else if (IS_QLA2200(ha)) {
|
2012-02-09 19:15:57 +00:00
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
|
2012-02-09 19:14:08 +00:00
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
|
2008-11-06 18:40:51 +00:00
|
|
|
req_length = REQUEST_ENTRY_CNT_2200;
|
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_2100;
|
|
|
|
ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
|
2005-07-06 17:30:05 +00:00
|
|
|
ha->gid_list_info_size = 4;
|
2009-01-05 19:18:11 +00:00
|
|
|
ha->flash_conf_off = ~0;
|
|
|
|
ha->flash_data_off = ~0;
|
|
|
|
ha->nvram_conf_off = ~0;
|
|
|
|
ha->nvram_data_off = ~0;
|
2007-07-19 22:06:00 +00:00
|
|
|
ha->isp_ops = &qla2100_isp_ops;
|
2005-07-06 17:31:47 +00:00
|
|
|
} else if (IS_QLA23XX(ha)) {
|
2012-02-09 19:15:57 +00:00
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
|
2005-04-16 22:20:36 +00:00
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
2008-11-06 18:40:51 +00:00
|
|
|
req_length = REQUEST_ENTRY_CNT_2200;
|
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_2300;
|
|
|
|
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
|
2005-07-06 17:30:05 +00:00
|
|
|
ha->gid_list_info_size = 6;
|
2006-02-01 00:05:17 +00:00
|
|
|
if (IS_QLA2322(ha) || IS_QLA6322(ha))
|
|
|
|
ha->optrom_size = OPTROM_SIZE_2322;
|
2009-01-05 19:18:11 +00:00
|
|
|
ha->flash_conf_off = ~0;
|
|
|
|
ha->flash_data_off = ~0;
|
|
|
|
ha->nvram_conf_off = ~0;
|
|
|
|
ha->nvram_data_off = ~0;
|
2007-07-19 22:06:00 +00:00
|
|
|
ha->isp_ops = &qla2300_isp_ops;
|
2008-04-03 20:13:26 +00:00
|
|
|
} else if (IS_QLA24XX_TYPE(ha)) {
|
2012-02-09 19:15:57 +00:00
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
|
2005-07-06 17:31:47 +00:00
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
2008-11-06 18:40:51 +00:00
|
|
|
req_length = REQUEST_ENTRY_CNT_24XX;
|
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_2300;
|
2012-05-15 18:34:28 +00:00
|
|
|
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
|
2008-11-06 18:40:51 +00:00
|
|
|
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
|
2007-07-05 20:16:51 +00:00
|
|
|
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
|
2005-07-06 17:31:47 +00:00
|
|
|
ha->gid_list_info_size = 8;
|
2006-02-01 00:05:17 +00:00
|
|
|
ha->optrom_size = OPTROM_SIZE_24XX;
|
2008-12-10 00:45:39 +00:00
|
|
|
ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
|
2007-07-19 22:06:00 +00:00
|
|
|
ha->isp_ops = &qla24xx_isp_ops;
|
2009-01-05 19:18:11 +00:00
|
|
|
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
|
|
|
|
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
|
|
|
|
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
|
|
|
|
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
|
2007-07-20 03:37:34 +00:00
|
|
|
} else if (IS_QLA25XX(ha)) {
|
2012-02-09 19:15:57 +00:00
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
|
2007-07-20 03:37:34 +00:00
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
2008-11-06 18:40:51 +00:00
|
|
|
req_length = REQUEST_ENTRY_CNT_24XX;
|
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_2300;
|
2012-05-15 18:34:28 +00:00
|
|
|
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
|
2008-11-06 18:40:51 +00:00
|
|
|
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
|
2007-07-20 03:37:34 +00:00
|
|
|
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
|
|
|
|
ha->gid_list_info_size = 8;
|
|
|
|
ha->optrom_size = OPTROM_SIZE_25XX;
|
2008-12-10 00:45:39 +00:00
|
|
|
ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
|
2007-07-20 03:37:34 +00:00
|
|
|
ha->isp_ops = &qla25xx_isp_ops;
|
2009-01-05 19:18:11 +00:00
|
|
|
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
|
|
|
|
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
|
|
|
|
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
|
|
|
|
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
|
|
|
|
} else if (IS_QLA81XX(ha)) {
|
2012-02-09 19:15:57 +00:00
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
|
2009-01-05 19:18:11 +00:00
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
|
|
|
req_length = REQUEST_ENTRY_CNT_24XX;
|
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_2300;
|
2013-01-30 08:34:39 +00:00
|
|
|
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
|
2009-01-05 19:18:11 +00:00
|
|
|
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
|
|
|
|
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
|
|
|
|
ha->gid_list_info_size = 8;
|
|
|
|
ha->optrom_size = OPTROM_SIZE_81XX;
|
2009-06-03 16:55:16 +00:00
|
|
|
ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
|
2009-01-05 19:18:11 +00:00
|
|
|
ha->isp_ops = &qla81xx_isp_ops;
|
|
|
|
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
|
|
|
|
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
|
|
|
|
ha->nvram_conf_off = ~0;
|
|
|
|
ha->nvram_data_off = ~0;
|
2010-04-13 00:59:55 +00:00
|
|
|
} else if (IS_QLA82XX(ha)) {
|
2012-02-09 19:15:57 +00:00
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
|
2010-04-13 00:59:55 +00:00
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
|
|
|
req_length = REQUEST_ENTRY_CNT_82XX;
|
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_82XX;
|
|
|
|
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
|
|
|
|
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
|
|
|
|
ha->gid_list_info_size = 8;
|
|
|
|
ha->optrom_size = OPTROM_SIZE_82XX;
|
2010-11-24 00:52:48 +00:00
|
|
|
ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
|
2010-04-13 00:59:55 +00:00
|
|
|
ha->isp_ops = &qla82xx_isp_ops;
|
|
|
|
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
|
|
|
|
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
|
|
|
|
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
|
|
|
|
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
|
2013-08-27 05:37:28 +00:00
|
|
|
} else if (IS_QLA8044(ha)) {
|
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
|
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
|
|
|
req_length = REQUEST_ENTRY_CNT_82XX;
|
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_82XX;
|
|
|
|
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
|
|
|
|
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
|
|
|
|
ha->gid_list_info_size = 8;
|
|
|
|
ha->optrom_size = OPTROM_SIZE_83XX;
|
|
|
|
ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
|
|
|
|
ha->isp_ops = &qla8044_isp_ops;
|
|
|
|
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
|
|
|
|
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
|
|
|
|
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
|
|
|
|
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
|
2012-02-09 19:15:34 +00:00
|
|
|
} else if (IS_QLA83XX(ha)) {
|
2012-08-22 18:21:03 +00:00
|
|
|
ha->portnum = PCI_FUNC(ha->pdev->devfn);
|
2012-02-09 19:15:57 +00:00
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
|
2012-02-09 19:15:34 +00:00
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
2014-09-25 10:14:54 +00:00
|
|
|
req_length = REQUEST_ENTRY_CNT_83XX;
|
2015-12-17 19:57:09 +00:00
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_83XX;
|
2013-01-30 08:34:40 +00:00
|
|
|
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
|
2012-02-09 19:15:34 +00:00
|
|
|
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
|
|
|
|
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
|
|
|
|
ha->gid_list_info_size = 8;
|
|
|
|
ha->optrom_size = OPTROM_SIZE_83XX;
|
|
|
|
ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
|
|
|
|
ha->isp_ops = &qla83xx_isp_ops;
|
|
|
|
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
|
|
|
|
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
|
|
|
|
ha->nvram_conf_off = ~0;
|
|
|
|
ha->nvram_data_off = ~0;
|
2013-03-28 12:21:23 +00:00
|
|
|
} else if (IS_QLAFX00(ha)) {
|
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
|
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
|
|
|
|
ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
|
|
|
|
req_length = REQUEST_ENTRY_CNT_FX00;
|
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_FX00;
|
|
|
|
ha->isp_ops = &qlafx00_isp_ops;
|
|
|
|
ha->port_down_retry_count = 30; /* default value */
|
|
|
|
ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
|
|
|
|
ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
|
2013-08-27 05:37:38 +00:00
|
|
|
ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
|
2013-03-28 12:21:23 +00:00
|
|
|
ha->mr.fw_hbt_en = 1;
|
2013-10-30 07:38:17 +00:00
|
|
|
ha->mr.host_info_resend = false;
|
|
|
|
ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
|
2014-02-26 09:15:06 +00:00
|
|
|
} else if (IS_QLA27XX(ha)) {
|
|
|
|
ha->portnum = PCI_FUNC(ha->pdev->devfn);
|
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
|
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
2015-12-17 19:57:09 +00:00
|
|
|
req_length = REQUEST_ENTRY_CNT_83XX;
|
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_83XX;
|
2015-06-10 15:05:18 +00:00
|
|
|
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
|
2014-02-26 09:15:06 +00:00
|
|
|
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
|
|
|
|
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
|
|
|
|
ha->gid_list_info_size = 8;
|
|
|
|
ha->optrom_size = OPTROM_SIZE_83XX;
|
|
|
|
ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
|
|
|
|
ha->isp_ops = &qla27xx_isp_ops;
|
|
|
|
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
|
|
|
|
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
|
|
|
|
ha->nvram_conf_off = ~0;
|
|
|
|
ha->nvram_data_off = ~0;
|
2019-03-12 18:08:13 +00:00
|
|
|
} else if (IS_QLA28XX(ha)) {
|
|
|
|
ha->portnum = PCI_FUNC(ha->pdev->devfn);
|
|
|
|
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
|
|
|
|
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
2021-08-10 04:37:09 +00:00
|
|
|
req_length = REQUEST_ENTRY_CNT_83XX;
|
|
|
|
rsp_length = RESPONSE_ENTRY_CNT_83XX;
|
2019-03-12 18:08:13 +00:00
|
|
|
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
|
|
|
|
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
|
|
|
|
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
|
|
|
|
ha->gid_list_info_size = 8;
|
|
|
|
ha->optrom_size = OPTROM_SIZE_28XX;
|
|
|
|
ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
|
|
|
|
ha->isp_ops = &qla27xx_isp_ops;
|
|
|
|
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX;
|
|
|
|
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX;
|
|
|
|
ha->nvram_conf_off = ~0;
|
|
|
|
ha->nvram_data_off = ~0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2012-02-09 19:15:34 +00:00
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
|
|
|
|
"mbx_count=%d, req_length=%d, "
|
|
|
|
"rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
|
2012-02-09 19:15:57 +00:00
|
|
|
"gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
|
|
|
|
"max_fibre_devices=%d.\n",
|
2011-07-14 19:00:13 +00:00
|
|
|
ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
|
|
|
|
ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
|
2012-02-09 19:15:57 +00:00
|
|
|
ha->nvram_npiv_size, ha->max_fibre_devices);
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
|
|
|
|
"isp_ops=%p, flash_conf_off=%d, "
|
|
|
|
"flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
|
|
|
|
ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
|
|
|
|
ha->nvram_conf_off, ha->nvram_data_off);
|
2011-11-18 17:03:16 +00:00
|
|
|
|
|
|
|
/* Configure PCI I/O space */
|
|
|
|
ret = ha->isp_ops->iospace_config(ha);
|
|
|
|
if (ret)
|
2012-11-21 07:40:43 +00:00
|
|
|
goto iospace_config_failed;
|
2011-11-18 17:03:16 +00:00
|
|
|
|
|
|
|
ql_log_pci(ql_log_info, pdev, 0x001d,
|
|
|
|
"Found an ISP%04X irq %d iobase 0x%p.\n",
|
|
|
|
pdev->device, pdev->irq, ha->iobase);
|
2008-05-13 05:21:11 +00:00
|
|
|
mutex_init(&ha->vport_lock);
|
2016-12-12 22:40:07 +00:00
|
|
|
mutex_init(&ha->mq_lock);
|
2008-01-17 17:02:13 +00:00
|
|
|
init_completion(&ha->mbx_cmd_comp);
|
|
|
|
complete(&ha->mbx_cmd_comp);
|
|
|
|
init_completion(&ha->mbx_intr_comp);
|
2010-05-28 22:08:21 +00:00
|
|
|
init_completion(&ha->dcbx_comp);
|
2013-02-08 06:58:04 +00:00
|
|
|
init_completion(&ha->lb_portup_comp);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-05 20:16:51 +00:00
|
|
|
set_bit(0, (unsigned long *) ha->vp_idx_map);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-01-22 17:45:37 +00:00
|
|
|
qla2x00_config_dma_addressing(ha);
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
|
|
|
|
"64 Bit addressing is %s.\n",
|
|
|
|
ha->flags.enable_64bit_addressing ? "enable" :
|
|
|
|
"disable");
|
2008-12-10 00:45:39 +00:00
|
|
|
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
|
2014-01-21 07:00:10 +00:00
|
|
|
if (ret) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log_pci(ql_log_fatal, pdev, 0x0031,
|
|
|
|
"Failed to allocate memory for adapter, aborting.\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
goto probe_hw_failed;
|
|
|
|
}
|
|
|
|
|
2008-12-10 00:45:39 +00:00
|
|
|
req->max_q_depth = MAX_Q_DEPTH;
|
2008-11-06 18:40:51 +00:00
|
|
|
if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
|
2008-12-10 00:45:39 +00:00
|
|
|
req->max_q_depth = ql2xmaxqdepth;
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
|
|
|
|
base_vha = qla2x00_create_host(sht, ha);
|
|
|
|
if (!base_vha) {
|
2005-06-10 00:21:28 +00:00
|
|
|
ret = -ENOMEM;
|
2008-11-06 18:40:51 +00:00
|
|
|
goto probe_hw_failed;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
pci_set_drvdata(pdev, base_vha);
|
2014-08-26 21:12:29 +00:00
|
|
|
set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
|
2008-11-06 18:40:51 +00:00
|
|
|
|
|
|
|
host = base_vha->host;
|
2009-04-07 05:33:40 +00:00
|
|
|
base_vha->req = req;
|
2008-12-10 00:45:39 +00:00
|
|
|
if (IS_QLA2XXX_MIDTYPE(ha))
|
2018-08-02 20:16:53 +00:00
|
|
|
base_vha->mgmt_svr_loop_id =
|
|
|
|
qla2x00_reserve_mgmt_server_loop_id(base_vha);
|
2008-12-10 00:45:39 +00:00
|
|
|
else
|
2008-11-06 18:40:51 +00:00
|
|
|
base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
|
|
|
|
base_vha->vp_idx;
|
2010-09-03 22:20:56 +00:00
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
/* Setup fcport template structure. */
|
|
|
|
ha->mr.fcport.vha = base_vha;
|
|
|
|
ha->mr.fcport.port_type = FCT_UNKNOWN;
|
|
|
|
ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
|
|
|
|
qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
|
|
|
|
ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
|
|
|
|
ha->mr.fcport.scan_state = 1;
|
|
|
|
|
2021-01-11 09:31:28 +00:00
|
|
|
qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN |
|
|
|
|
QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT |
|
|
|
|
QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN);
|
|
|
|
|
2010-09-03 22:20:56 +00:00
|
|
|
/* Set the SG table size based on ISP type */
|
|
|
|
if (!IS_FWI2_CAPABLE(ha)) {
|
|
|
|
if (IS_QLA2100(ha))
|
|
|
|
host->sg_tablesize = 32;
|
|
|
|
} else {
|
|
|
|
if (!IS_QLA82XX(ha))
|
|
|
|
host->sg_tablesize = QLA_SG_ALL;
|
|
|
|
}
|
2012-02-09 19:15:57 +00:00
|
|
|
host->max_id = ha->max_fibre_devices;
|
2008-11-06 18:40:51 +00:00
|
|
|
host->cmd_per_lun = 3;
|
|
|
|
host->unique_id = host->host_no;
|
2011-08-16 18:29:23 +00:00
|
|
|
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
|
2010-07-23 10:28:38 +00:00
|
|
|
host->max_cmd_len = 32;
|
|
|
|
else
|
|
|
|
host->max_cmd_len = MAX_CMDSZ;
|
2008-11-06 18:40:51 +00:00
|
|
|
host->max_channel = MAX_BUSES - 1;
|
2014-06-03 08:58:54 +00:00
|
|
|
/* Older HBAs support only 16-bit LUNs */
|
|
|
|
if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) &&
|
|
|
|
ql2xmaxlun > 0xffff)
|
|
|
|
host->max_lun = 0xffff;
|
|
|
|
else
|
|
|
|
host->max_lun = ql2xmaxlun;
|
2008-11-06 18:40:51 +00:00
|
|
|
host->transportt = qla2xxx_transport_template;
|
2010-01-12 21:02:47 +00:00
|
|
|
sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
|
2008-11-06 18:40:51 +00:00
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x0033,
|
|
|
|
"max_id=%d this_id=%d "
|
|
|
|
"cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
|
2014-06-25 13:27:38 +00:00
|
|
|
"max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id,
|
2011-07-14 19:00:13 +00:00
|
|
|
host->this_id, host->cmd_per_lun, host->unique_id,
|
|
|
|
host->max_cmd_len, host->max_channel, host->max_lun,
|
|
|
|
host->transportt, sht->vendor_id);
|
|
|
|
|
2017-10-16 18:26:05 +00:00
|
|
|
INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
|
|
|
|
|
2016-12-12 22:40:07 +00:00
|
|
|
/* Set up the irqs */
|
|
|
|
ret = qla2x00_request_irqs(ha, rsp);
|
|
|
|
if (ret)
|
2018-03-05 05:02:55 +00:00
|
|
|
goto probe_failed;
|
2016-12-12 22:40:07 +00:00
|
|
|
|
2012-05-15 18:34:14 +00:00
|
|
|
/* Alloc arrays of request and response ring ptrs */
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
ret = qla2x00_alloc_queues(ha, req, rsp);
|
|
|
|
if (ret) {
|
2012-05-15 18:34:14 +00:00
|
|
|
ql_log(ql_log_fatal, base_vha, 0x003d,
|
|
|
|
"Failed to allocate memory for queue pointers..."
|
|
|
|
"aborting.\n");
|
2019-07-26 16:07:35 +00:00
|
|
|
ret = -ENODEV;
|
2018-03-05 05:02:55 +00:00
|
|
|
goto probe_failed;
|
2012-05-15 18:34:14 +00:00
|
|
|
}
|
|
|
|
|
2018-11-01 22:36:27 +00:00
|
|
|
if (ha->mqenable) {
|
2016-12-12 22:40:08 +00:00
|
|
|
/* number of hardware queues supported by blk/scsi-mq*/
|
|
|
|
host->nr_hw_queues = ha->max_qpairs;
|
|
|
|
|
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x0192,
|
|
|
|
"blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
|
2017-10-13 22:43:22 +00:00
|
|
|
} else {
|
|
|
|
if (ql2xnvmeenable) {
|
|
|
|
host->nr_hw_queues = ha->max_qpairs;
|
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x0194,
|
|
|
|
"FC-NVMe support is enabled, HW queues=%d\n",
|
|
|
|
host->nr_hw_queues);
|
|
|
|
} else {
|
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x0193,
|
|
|
|
"blk/scsi-mq disabled.\n");
|
|
|
|
}
|
|
|
|
}
|
2016-12-12 22:40:08 +00:00
|
|
|
|
2012-05-15 18:34:28 +00:00
|
|
|
qlt_probe_one_stage1(base_vha, ha);
|
2012-05-15 18:34:14 +00:00
|
|
|
|
2010-01-12 21:02:46 +00:00
|
|
|
pci_save_state(pdev);
|
|
|
|
|
2012-05-15 18:34:14 +00:00
|
|
|
/* Assign back pointers */
|
2009-04-07 05:33:40 +00:00
|
|
|
rsp->req = req;
|
|
|
|
req->rsp = rsp;
|
2012-05-15 18:34:14 +00:00
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
if (IS_QLAFX00(ha)) {
|
|
|
|
ha->rsp_q_map[0] = rsp;
|
|
|
|
ha->req_q_map[0] = req;
|
|
|
|
set_bit(0, ha->req_qid_map);
|
|
|
|
set_bit(0, ha->rsp_qid_map);
|
|
|
|
}
|
|
|
|
|
2009-03-24 16:07:55 +00:00
|
|
|
/* FWI2-capable only. */
|
|
|
|
req->req_q_in = &ha->iobase->isp24.req_q_in;
|
|
|
|
req->req_q_out = &ha->iobase->isp24.req_q_out;
|
|
|
|
rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
|
|
|
|
rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
|
2019-03-12 18:08:13 +00:00
|
|
|
if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
|
|
|
|
IS_QLA28XX(ha)) {
|
2009-03-24 16:07:55 +00:00
|
|
|
req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
|
|
|
|
req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
|
|
|
|
rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
|
|
|
|
rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
|
2008-12-18 18:06:15 +00:00
|
|
|
}
|
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
if (IS_QLAFX00(ha)) {
|
|
|
|
req->req_q_in = &ha->iobase->ispfx00.req_q_in;
|
|
|
|
req->req_q_out = &ha->iobase->ispfx00.req_q_out;
|
|
|
|
rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
|
|
|
|
rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
|
|
|
|
}
|
|
|
|
|
2013-08-27 05:37:28 +00:00
|
|
|
if (IS_P3P_TYPE(ha)) {
|
2010-04-13 00:59:55 +00:00
|
|
|
req->req_q_out = &ha->iobase->isp82.req_q_out[0];
|
|
|
|
rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
|
|
|
|
rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
|
|
|
|
}
|
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
|
|
|
|
"rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
|
|
|
|
ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
|
|
|
|
ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
|
|
|
|
"req->req_q_in=%p req->req_q_out=%p "
|
|
|
|
"rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
|
|
|
|
req->req_q_in, req->req_q_out,
|
|
|
|
rsp->rsp_q_in, rsp->rsp_q_out);
|
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x003e,
|
|
|
|
"rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
|
|
|
|
ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
|
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x003f,
|
|
|
|
"req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
|
|
|
|
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-12-02 13:23:09 +00:00
|
|
|
ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
|
2019-09-18 16:36:58 +00:00
|
|
|
if (unlikely(!ha->wq)) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto probe_failed;
|
|
|
|
}
|
2018-07-02 20:01:59 +00:00
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
if (ha->isp_ops->initialize_adapter(base_vha)) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, base_vha, 0x00d6,
|
|
|
|
"Failed to initialize adapter - Adapter flags %x.\n",
|
|
|
|
base_vha->device_flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-04-13 00:59:55 +00:00
|
|
|
if (IS_QLA82XX(ha)) {
|
|
|
|
qla82xx_idc_lock(ha);
|
|
|
|
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
|
2012-08-22 18:21:03 +00:00
|
|
|
QLA8XXX_DEV_FAILED);
|
2010-04-13 00:59:55 +00:00
|
|
|
qla82xx_idc_unlock(ha);
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, base_vha, 0x00d7,
|
|
|
|
"HW State: FAILED.\n");
|
2013-08-27 05:37:28 +00:00
|
|
|
} else if (IS_QLA8044(ha)) {
|
|
|
|
qla8044_idc_lock(ha);
|
|
|
|
qla8044_wr_direct(base_vha,
|
|
|
|
QLA8044_CRB_DEV_STATE_INDEX,
|
|
|
|
QLA8XXX_DEV_FAILED);
|
|
|
|
qla8044_idc_unlock(ha);
|
|
|
|
ql_log(ql_log_fatal, base_vha, 0x0150,
|
|
|
|
"HW State: FAILED.\n");
|
2010-04-13 00:59:55 +00:00
|
|
|
}
|
|
|
|
|
2005-06-10 00:21:28 +00:00
|
|
|
ret = -ENODEV;
|
2005-04-16 22:20:36 +00:00
|
|
|
goto probe_failed;
|
|
|
|
}
|
|
|
|
|
2014-02-26 09:15:04 +00:00
|
|
|
if (IS_QLAFX00(ha))
|
|
|
|
host->can_queue = QLAFX00_MAX_CANQUEUE;
|
|
|
|
else
|
|
|
|
host->can_queue = req->num_outstanding_cmds - 10;
|
|
|
|
|
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x0032,
|
|
|
|
"can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
|
|
|
|
host->can_queue, base_vha->req,
|
|
|
|
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
|
|
|
|
|
2017-06-14 03:47:18 +00:00
|
|
|
if (ha->mqenable) {
|
|
|
|
bool startit = false;
|
|
|
|
|
2018-11-01 22:36:27 +00:00
|
|
|
if (QLA_TGT_MODE_ENABLED())
|
2017-06-14 03:47:18 +00:00
|
|
|
startit = false;
|
|
|
|
|
2018-11-01 22:36:27 +00:00
|
|
|
if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
|
2017-06-14 03:47:18 +00:00
|
|
|
startit = true;
|
|
|
|
|
2018-11-01 22:36:27 +00:00
|
|
|
/* Create start of day qpairs for Block MQ */
|
|
|
|
for (i = 0; i < ha->max_qpairs; i++)
|
|
|
|
qla2xxx_create_qpair(base_vha, 5, 0, startit);
|
2016-12-12 22:40:08 +00:00
|
|
|
}
|
2020-09-04 04:51:26 +00:00
|
|
|
qla_init_iocb_limit(base_vha);
|
2009-04-07 05:33:41 +00:00
|
|
|
|
2009-06-03 16:55:17 +00:00
|
|
|
if (ha->flags.running_gold_fw)
|
|
|
|
goto skip_dpc;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Startup the kernel thread for this host adapter
|
|
|
|
*/
|
2006-02-14 17:46:22 +00:00
|
|
|
ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
|
2011-07-14 19:00:13 +00:00
|
|
|
"%s_dpc", base_vha->host_str);
|
2006-02-14 17:46:22 +00:00
|
|
|
if (IS_ERR(ha->dpc_thread)) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, base_vha, 0x00ed,
|
|
|
|
"Failed to start DPC thread.\n");
|
2006-02-14 17:46:22 +00:00
|
|
|
ret = PTR_ERR(ha->dpc_thread);
|
2017-10-20 13:17:22 +00:00
|
|
|
ha->dpc_thread = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
goto probe_failed;
|
|
|
|
}
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x00ee,
|
|
|
|
"DPC thread started successfully.\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-15 18:34:28 +00:00
|
|
|
/*
|
|
|
|
* If we're not coming up in initiator mode, we might sit for
|
|
|
|
* a while without waking up the dpc thread, which leads to a
|
|
|
|
* stuck process warning. So just kick the dpc once here and
|
|
|
|
* let the kthread start (and go back to sleep in qla2x00_do_dpc).
|
|
|
|
*/
|
|
|
|
qla2xxx_wake_dpc(base_vha);
|
|
|
|
|
2013-10-30 07:38:16 +00:00
|
|
|
INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
|
|
|
|
|
2012-08-22 18:21:04 +00:00
|
|
|
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
|
|
|
|
sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
|
|
|
|
ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
|
|
|
|
INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
|
|
|
|
|
|
|
|
sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
|
|
|
|
ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
|
|
|
|
INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
|
|
|
|
INIT_WORK(&ha->idc_state_handler,
|
|
|
|
qla83xx_idc_state_handler_work);
|
|
|
|
INIT_WORK(&ha->nic_core_unrecoverable,
|
|
|
|
qla83xx_nic_core_unrecoverable_work);
|
|
|
|
}
|
|
|
|
|
2009-06-03 16:55:17 +00:00
|
|
|
skip_dpc:
|
2008-11-06 18:40:51 +00:00
|
|
|
list_add_tail(&base_vha->list, &ha->vp_list);
|
|
|
|
base_vha->host->irq = ha->pdev->irq;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Initialized the timer */
|
2017-09-03 20:23:32 +00:00
|
|
|
qla2x00_start_timer(base_vha, WATCH_INTERVAL);
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x00ef,
|
|
|
|
"Started qla2x00_timer with "
|
|
|
|
"interval=%d.\n", WATCH_INTERVAL);
|
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x00f0,
|
|
|
|
"Detected hba at address=%p.\n",
|
|
|
|
ha);
|
2006-11-22 16:22:19 +00:00
|
|
|
|
2011-08-16 18:29:23 +00:00
|
|
|
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
|
2010-05-04 22:01:30 +00:00
|
|
|
if (ha->fw_attributes & BIT_4) {
|
2012-08-22 18:21:31 +00:00
|
|
|
int prot = 0, guard;
|
2019-04-11 21:53:17 +00:00
|
|
|
|
2010-05-04 22:01:30 +00:00
|
|
|
base_vha->flags.difdix_supported = 1;
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
|
|
|
|
"Registering for DIF/DIX type 1 and 3 protection.\n");
|
2011-08-16 18:29:22 +00:00
|
|
|
if (ql2xenabledif == 1)
|
|
|
|
prot = SHOST_DIX_TYPE0_PROTECTION;
|
2018-12-21 17:33:44 +00:00
|
|
|
if (ql2xprotmask)
|
|
|
|
scsi_host_set_prot(host, ql2xprotmask);
|
|
|
|
else
|
|
|
|
scsi_host_set_prot(host,
|
|
|
|
prot | SHOST_DIF_TYPE1_PROTECTION
|
|
|
|
| SHOST_DIF_TYPE2_PROTECTION
|
|
|
|
| SHOST_DIF_TYPE3_PROTECTION
|
|
|
|
| SHOST_DIX_TYPE1_PROTECTION
|
|
|
|
| SHOST_DIX_TYPE2_PROTECTION
|
|
|
|
| SHOST_DIX_TYPE3_PROTECTION);
|
2012-08-22 18:21:31 +00:00
|
|
|
|
|
|
|
guard = SHOST_DIX_GUARD_CRC;
|
|
|
|
|
|
|
|
if (IS_PI_IPGUARD_CAPABLE(ha) &&
|
|
|
|
(ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
|
|
|
|
guard |= SHOST_DIX_GUARD_IP;
|
|
|
|
|
2018-12-21 17:33:44 +00:00
|
|
|
if (ql2xprotguard)
|
|
|
|
scsi_host_set_guard(host, ql2xprotguard);
|
|
|
|
else
|
|
|
|
scsi_host_set_guard(host, guard);
|
2010-05-04 22:01:30 +00:00
|
|
|
} else
|
|
|
|
base_vha->flags.difdix_supported = 0;
|
|
|
|
}
|
|
|
|
|
2010-04-13 00:59:55 +00:00
|
|
|
ha->isp_ops->enable_intrs(ha);
|
|
|
|
|
2013-08-27 05:37:41 +00:00
|
|
|
if (IS_QLAFX00(ha)) {
|
|
|
|
ret = qlafx00_fx_disc(base_vha,
|
|
|
|
&base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
|
|
|
|
host->sg_tablesize = (ha->mr.extended_io_enabled) ?
|
|
|
|
QLA_SG_ALL : 128;
|
|
|
|
}
|
|
|
|
|
2005-06-10 00:21:28 +00:00
|
|
|
ret = scsi_add_host(host, &pdev->dev);
|
|
|
|
if (ret)
|
|
|
|
goto probe_failed;
|
|
|
|
|
2009-12-02 15:11:16 +00:00
|
|
|
base_vha->flags.init_done = 1;
|
|
|
|
base_vha->flags.online = 1;
|
2014-04-11 20:54:14 +00:00
|
|
|
ha->prev_minidump_failed = 0;
|
2009-12-02 15:11:16 +00:00
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x00f2,
|
|
|
|
"Init done and hba is online.\n");
|
|
|
|
|
2017-01-20 06:28:00 +00:00
|
|
|
if (qla_ini_mode_enabled(base_vha) ||
|
|
|
|
qla_dual_mode_enabled(base_vha))
|
2012-05-15 18:34:28 +00:00
|
|
|
scsi_scan_host(host);
|
|
|
|
else
|
|
|
|
ql_dbg(ql_dbg_init, base_vha, 0x0122,
|
|
|
|
"skipping scsi_scan_host() for non-initiator port\n");
|
2006-11-22 16:24:48 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_alloc_sysfs_attr(base_vha);
|
2005-06-10 00:21:28 +00:00
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
if (IS_QLAFX00(ha)) {
|
|
|
|
ret = qlafx00_fx_disc(base_vha,
|
|
|
|
&base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
|
|
|
|
|
|
|
|
/* Register system information */
|
|
|
|
ret = qlafx00_fx_disc(base_vha,
|
|
|
|
&base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
|
|
|
|
}
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_init_host_attr(base_vha);
|
2005-06-10 00:21:28 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_dfs_setup(base_vha);
|
2008-01-17 17:02:17 +00:00
|
|
|
|
2013-10-30 07:38:22 +00:00
|
|
|
ql_log(ql_log_info, base_vha, 0x00fb,
|
|
|
|
"QLogic %s - %s.\n", ha->model_number, ha->model_desc);
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_info, base_vha, 0x00fc,
|
|
|
|
"ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
|
2019-08-09 03:01:55 +00:00
|
|
|
pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info,
|
|
|
|
sizeof(pci_info)),
|
2011-07-14 19:00:13 +00:00
|
|
|
pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
|
|
|
|
base_vha->host_no,
|
2014-09-25 09:16:46 +00:00
|
|
|
ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-15 18:34:28 +00:00
|
|
|
qlt_add_target(ha, base_vha);
|
|
|
|
|
2014-08-26 21:12:29 +00:00
|
|
|
clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
|
2016-07-06 15:14:19 +00:00
|
|
|
|
|
|
|
if (test_bit(UNLOADING, &base_vha->dpc_flags))
|
|
|
|
return -ENODEV;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
probe_failed:
|
2021-06-24 05:25:58 +00:00
|
|
|
qla_enode_stop(base_vha);
|
2021-06-24 05:26:03 +00:00
|
|
|
qla_edb_stop(base_vha);
|
2019-08-14 14:24:41 +00:00
|
|
|
if (base_vha->gnl.l) {
|
|
|
|
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
|
|
|
|
base_vha->gnl.l, base_vha->gnl.ldma);
|
|
|
|
base_vha->gnl.l = NULL;
|
|
|
|
}
|
|
|
|
|
2009-03-24 16:08:05 +00:00
|
|
|
if (base_vha->timer_active)
|
|
|
|
qla2x00_stop_timer(base_vha);
|
|
|
|
base_vha->flags.online = 0;
|
|
|
|
if (ha->dpc_thread) {
|
|
|
|
struct task_struct *t = ha->dpc_thread;
|
|
|
|
|
|
|
|
ha->dpc_thread = NULL;
|
|
|
|
kthread_stop(t);
|
|
|
|
}
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_free_device(base_vha);
|
|
|
|
scsi_host_put(base_vha->host);
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
/*
|
|
|
|
* Need to NULL out local req/rsp after
|
|
|
|
* qla2x00_free_device => qla2x00_free_queues frees
|
|
|
|
* what these are pointing to. Or else we'll
|
|
|
|
* fall over below in qla2x00_free_req/rsp_que.
|
|
|
|
*/
|
|
|
|
req = NULL;
|
|
|
|
rsp = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
probe_hw_failed:
|
2018-01-16 04:46:46 +00:00
|
|
|
qla2x00_mem_free(ha);
|
|
|
|
qla2x00_free_req_que(ha, req);
|
|
|
|
qla2x00_free_rsp_que(ha, rsp);
|
2014-08-26 21:11:18 +00:00
|
|
|
qla2x00_clear_drv_active(ha);
|
|
|
|
|
2012-11-21 07:40:43 +00:00
|
|
|
iospace_config_failed:
|
2013-08-27 05:37:28 +00:00
|
|
|
if (IS_P3P_TYPE(ha)) {
|
2012-11-21 07:40:43 +00:00
|
|
|
if (!ha->nx_pcibase)
|
2014-02-26 09:15:06 +00:00
|
|
|
iounmap((device_reg_t *)ha->nx_pcibase);
|
2010-04-13 00:59:55 +00:00
|
|
|
if (!ql2xdbwr)
|
2014-02-26 09:15:06 +00:00
|
|
|
iounmap((device_reg_t *)ha->nxdb_wr_ptr);
|
2010-04-13 00:59:55 +00:00
|
|
|
} else {
|
|
|
|
if (ha->iobase)
|
|
|
|
iounmap(ha->iobase);
|
2013-03-28 12:21:23 +00:00
|
|
|
if (ha->cregbase)
|
|
|
|
iounmap(ha->cregbase);
|
2010-04-13 00:59:55 +00:00
|
|
|
}
|
2008-11-06 18:40:51 +00:00
|
|
|
pci_release_selected_regions(ha->pdev, ha->bars);
|
|
|
|
kfree(ha);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-05-23 14:50:47 +00:00
|
|
|
disable_device:
|
2008-11-06 18:40:51 +00:00
|
|
|
pci_disable_device(pdev);
|
2005-06-10 00:21:28 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 18:09:14 +00:00
|
|
|
static void __qla_set_remove_flag(scsi_qla_host_t *base_vha)
|
|
|
|
{
|
|
|
|
scsi_qla_host_t *vp;
|
|
|
|
unsigned long flags;
|
|
|
|
struct qla_hw_data *ha;
|
|
|
|
|
|
|
|
if (!base_vha)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ha = base_vha->hw;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ha->vport_slock, flags);
|
|
|
|
list_for_each_entry(vp, &ha->vp_list, list)
|
|
|
|
set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Indicate device removal to prevent future board_disable
|
|
|
|
* and wait until any pending board_disable has completed.
|
|
|
|
*/
|
|
|
|
set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
|
|
|
|
spin_unlock_irqrestore(&ha->vport_slock, flags);
|
|
|
|
}
|
|
|
|
|
2010-10-15 18:27:46 +00:00
|
|
|
static void
|
|
|
|
qla2x00_shutdown(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
scsi_qla_host_t *vha;
|
|
|
|
struct qla_hw_data *ha;
|
|
|
|
|
|
|
|
vha = pci_get_drvdata(pdev);
|
|
|
|
ha = vha->hw;
|
|
|
|
|
2017-08-23 22:05:00 +00:00
|
|
|
ql_log(ql_log_info, vha, 0xfffa,
|
|
|
|
"Adapter shutdown\n");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prevent future board_disable and wait
|
|
|
|
* until any pending board_disable has completed.
|
|
|
|
*/
|
2019-09-12 18:09:14 +00:00
|
|
|
__qla_set_remove_flag(vha);
|
2017-08-23 22:05:00 +00:00
|
|
|
cancel_work_sync(&ha->board_disable);
|
|
|
|
|
|
|
|
if (!atomic_read(&pdev->enable_cnt))
|
|
|
|
return;
|
|
|
|
|
2013-08-27 05:37:37 +00:00
|
|
|
/* Notify ISPFX00 firmware */
|
|
|
|
if (IS_QLAFX00(ha))
|
|
|
|
qlafx00_driver_shutdown(vha, 20);
|
|
|
|
|
2010-10-15 18:27:46 +00:00
|
|
|
/* Turn-off FCE trace */
|
|
|
|
if (ha->flags.fce_enabled) {
|
|
|
|
qla2x00_disable_fce_trace(vha, NULL, NULL);
|
|
|
|
ha->flags.fce_enabled = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Turn-off EFT trace */
|
|
|
|
if (ha->eft)
|
|
|
|
qla2x00_disable_eft_trace(vha);
|
|
|
|
|
2019-03-12 18:08:13 +00:00
|
|
|
if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
|
|
|
|
IS_QLA28XX(ha)) {
|
2017-12-28 20:33:11 +00:00
|
|
|
if (ha->flags.fw_started)
|
|
|
|
qla2x00_abort_isp_cleanup(vha);
|
|
|
|
} else {
|
|
|
|
/* Stop currently executing firmware. */
|
|
|
|
qla2x00_try_to_stop_firmware(vha);
|
|
|
|
}
|
2010-10-15 18:27:46 +00:00
|
|
|
|
2019-10-24 06:38:04 +00:00
|
|
|
/* Disable timer */
|
|
|
|
if (vha->timer_active)
|
|
|
|
qla2x00_stop_timer(vha);
|
|
|
|
|
2010-10-15 18:27:46 +00:00
|
|
|
/* Turn adapter off line */
|
|
|
|
vha->flags.online = 0;
|
|
|
|
|
|
|
|
/* turn-off interrupts on the card */
|
|
|
|
if (ha->interrupts_on) {
|
|
|
|
vha->flags.init_done = 0;
|
|
|
|
ha->isp_ops->disable_intrs(ha);
|
|
|
|
}
|
|
|
|
|
|
|
|
qla2x00_free_irqs(vha);
|
|
|
|
|
|
|
|
qla2x00_free_fw_dump(ha);
|
2014-09-25 09:17:02 +00:00
|
|
|
|
|
|
|
pci_disable_device(pdev);
|
2017-08-23 22:05:00 +00:00
|
|
|
ql_log(ql_log_info, vha, 0xfffe,
|
|
|
|
"Adapter shutdown successfully.\n");
|
2010-10-15 18:27:46 +00:00
|
|
|
}
|
|
|
|
|
2013-10-30 07:38:15 +00:00
|
|
|
/* Deletes all the virtual ports for a given ha */
|
2008-01-14 08:55:16 +00:00
|
|
|
static void
|
2013-10-30 07:38:15 +00:00
|
|
|
qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2013-10-30 07:38:15 +00:00
|
|
|
scsi_qla_host_t *vha;
|
2010-09-03 21:57:00 +00:00
|
|
|
unsigned long flags;
|
2008-11-06 18:40:51 +00:00
|
|
|
|
2011-05-10 18:18:16 +00:00
|
|
|
mutex_lock(&ha->vport_lock);
|
|
|
|
while (ha->cur_vport_count) {
|
|
|
|
spin_lock_irqsave(&ha->vport_slock, flags);
|
2010-09-03 21:57:00 +00:00
|
|
|
|
2011-05-10 18:18:16 +00:00
|
|
|
BUG_ON(base_vha->list.next == &ha->vp_list);
|
|
|
|
/* This assumes first entry in ha->vp_list is always base vha */
|
|
|
|
vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
|
2015-07-09 14:23:26 +00:00
|
|
|
scsi_host_get(vha->host);
|
2010-09-03 21:57:00 +00:00
|
|
|
|
2011-05-10 18:18:16 +00:00
|
|
|
spin_unlock_irqrestore(&ha->vport_slock, flags);
|
|
|
|
mutex_unlock(&ha->vport_lock);
|
|
|
|
|
2018-12-10 20:36:23 +00:00
|
|
|
qla_nvme_delete(vha);
|
|
|
|
|
2011-05-10 18:18:16 +00:00
|
|
|
fc_vport_terminate(vha->fc_vport);
|
|
|
|
scsi_host_put(vha->host);
|
2010-09-03 21:57:00 +00:00
|
|
|
|
2011-05-10 18:18:16 +00:00
|
|
|
mutex_lock(&ha->vport_lock);
|
2008-11-06 18:40:51 +00:00
|
|
|
}
|
2011-05-10 18:18:16 +00:00
|
|
|
mutex_unlock(&ha->vport_lock);
|
2013-10-30 07:38:15 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-10-30 07:38:15 +00:00
|
|
|
/* Stops all deferred work threads */
|
|
|
|
static void
|
|
|
|
qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
|
|
|
|
{
|
2012-08-22 18:21:03 +00:00
|
|
|
/* Cancel all work and destroy DPC workqueues */
|
|
|
|
if (ha->dpc_lp_wq) {
|
|
|
|
cancel_work_sync(&ha->idc_aen);
|
|
|
|
destroy_workqueue(ha->dpc_lp_wq);
|
|
|
|
ha->dpc_lp_wq = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ha->dpc_hp_wq) {
|
|
|
|
cancel_work_sync(&ha->nic_core_reset);
|
|
|
|
cancel_work_sync(&ha->idc_state_handler);
|
|
|
|
cancel_work_sync(&ha->nic_core_unrecoverable);
|
|
|
|
destroy_workqueue(ha->dpc_hp_wq);
|
|
|
|
ha->dpc_hp_wq = NULL;
|
|
|
|
}
|
|
|
|
|
2009-03-24 16:08:05 +00:00
|
|
|
/* Kill the kernel thread for this host */
|
|
|
|
if (ha->dpc_thread) {
|
|
|
|
struct task_struct *t = ha->dpc_thread;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* qla2xxx_wake_dpc checks for ->dpc_thread
|
|
|
|
* so we need to zero it out.
|
|
|
|
*/
|
|
|
|
ha->dpc_thread = NULL;
|
|
|
|
kthread_stop(t);
|
|
|
|
}
|
2013-10-30 07:38:15 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-10-30 07:38:15 +00:00
|
|
|
static void
|
|
|
|
qla2x00_unmap_iobases(struct qla_hw_data *ha)
|
|
|
|
{
|
2010-04-13 00:59:55 +00:00
|
|
|
if (IS_QLA82XX(ha)) {
|
2010-05-28 22:08:15 +00:00
|
|
|
|
2014-02-26 09:15:06 +00:00
|
|
|
iounmap((device_reg_t *)ha->nx_pcibase);
|
2010-04-13 00:59:55 +00:00
|
|
|
if (!ql2xdbwr)
|
2014-02-26 09:15:06 +00:00
|
|
|
iounmap((device_reg_t *)ha->nxdb_wr_ptr);
|
2010-04-13 00:59:55 +00:00
|
|
|
} else {
|
|
|
|
if (ha->iobase)
|
|
|
|
iounmap(ha->iobase);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
if (ha->cregbase)
|
|
|
|
iounmap(ha->cregbase);
|
|
|
|
|
2010-04-13 00:59:55 +00:00
|
|
|
if (ha->mqiobase)
|
|
|
|
iounmap(ha->mqiobase);
|
2012-02-09 19:15:34 +00:00
|
|
|
|
2019-03-12 18:08:13 +00:00
|
|
|
if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
|
|
|
|
ha->msixbase)
|
2012-02-09 19:15:34 +00:00
|
|
|
iounmap(ha->msixbase);
|
2010-04-13 00:59:55 +00:00
|
|
|
}
|
2013-10-30 07:38:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2014-08-26 21:10:41 +00:00
|
|
|
qla2x00_clear_drv_active(struct qla_hw_data *ha)
|
2013-10-30 07:38:15 +00:00
|
|
|
{
|
|
|
|
if (IS_QLA8044(ha)) {
|
|
|
|
qla8044_idc_lock(ha);
|
2013-11-07 07:54:56 +00:00
|
|
|
qla8044_clear_drv_active(ha);
|
2013-10-30 07:38:15 +00:00
|
|
|
qla8044_idc_unlock(ha);
|
|
|
|
} else if (IS_QLA82XX(ha)) {
|
|
|
|
qla82xx_idc_lock(ha);
|
|
|
|
qla82xx_clear_drv_active(ha);
|
|
|
|
qla82xx_idc_unlock(ha);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qla2x00_remove_one(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
scsi_qla_host_t *base_vha;
|
|
|
|
struct qla_hw_data *ha;
|
|
|
|
|
2014-08-26 21:12:14 +00:00
|
|
|
base_vha = pci_get_drvdata(pdev);
|
|
|
|
ha = base_vha->hw;
|
2018-07-18 21:29:53 +00:00
|
|
|
ql_log(ql_log_info, base_vha, 0xb079,
|
|
|
|
"Removing driver\n");
|
2019-09-12 18:09:14 +00:00
|
|
|
__qla_set_remove_flag(base_vha);
|
2014-08-26 21:12:14 +00:00
|
|
|
cancel_work_sync(&ha->board_disable);
|
|
|
|
|
2013-10-30 07:38:15 +00:00
|
|
|
/*
|
2014-08-26 21:12:14 +00:00
|
|
|
* If the PCI device is disabled then there was a PCI-disconnect and
|
|
|
|
* qla2x00_disable_board_on_pci_error has taken care of most of the
|
|
|
|
* resources.
|
2013-10-30 07:38:15 +00:00
|
|
|
*/
|
2014-08-26 21:12:14 +00:00
|
|
|
if (!atomic_read(&pdev->enable_cnt)) {
|
2017-01-20 06:28:00 +00:00
|
|
|
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
|
|
|
|
base_vha->gnl.l, base_vha->gnl.ldma);
|
2019-08-14 14:24:41 +00:00
|
|
|
base_vha->gnl.l = NULL;
|
2014-08-26 21:12:14 +00:00
|
|
|
scsi_host_put(base_vha->host);
|
|
|
|
kfree(ha);
|
|
|
|
pci_set_drvdata(pdev, NULL);
|
2013-10-30 07:38:15 +00:00
|
|
|
return;
|
2014-08-26 21:12:14 +00:00
|
|
|
}
|
2014-04-11 20:54:38 +00:00
|
|
|
qla2x00_wait_for_hba_ready(base_vha);
|
|
|
|
|
2020-04-21 20:46:20 +00:00
|
|
|
/*
|
|
|
|
* if UNLOADING flag is already set, then continue unload,
|
|
|
|
* where it was set first.
|
|
|
|
*/
|
|
|
|
if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
|
|
|
|
return;
|
|
|
|
|
2019-03-12 18:08:13 +00:00
|
|
|
if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
|
|
|
|
IS_QLA28XX(ha)) {
|
2018-07-18 21:29:53 +00:00
|
|
|
if (ha->flags.fw_started)
|
|
|
|
qla2x00_abort_isp_cleanup(base_vha);
|
|
|
|
} else if (!IS_QLAFX00(ha)) {
|
|
|
|
if (IS_QLA8031(ha)) {
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
|
|
|
|
"Clearing fcoe driver presence.\n");
|
|
|
|
if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
|
|
|
|
"Error while clearing DRV-Presence.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
qla2x00_try_to_stop_firmware(base_vha);
|
|
|
|
}
|
|
|
|
|
2018-01-23 19:05:21 +00:00
|
|
|
qla2x00_wait_for_sess_deletion(base_vha);
|
|
|
|
|
2017-06-21 20:48:43 +00:00
|
|
|
qla_nvme_delete(base_vha);
|
|
|
|
|
2017-01-20 06:28:00 +00:00
|
|
|
dma_free_coherent(&ha->pdev->dev,
|
|
|
|
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
|
2013-10-30 07:38:15 +00:00
|
|
|
|
2019-08-14 14:24:41 +00:00
|
|
|
base_vha->gnl.l = NULL;
|
2021-06-24 05:25:58 +00:00
|
|
|
qla_enode_stop(base_vha);
|
2021-06-24 05:26:03 +00:00
|
|
|
qla_edb_stop(base_vha);
|
2019-08-14 14:24:41 +00:00
|
|
|
|
2017-12-28 20:33:26 +00:00
|
|
|
vfree(base_vha->scan.l);
|
|
|
|
|
2013-10-30 07:38:15 +00:00
|
|
|
if (IS_QLAFX00(ha))
|
|
|
|
qlafx00_driver_shutdown(base_vha, 20);
|
|
|
|
|
|
|
|
qla2x00_delete_all_vps(ha, base_vha);
|
|
|
|
|
|
|
|
qla2x00_dfs_remove(base_vha);
|
|
|
|
|
|
|
|
qla84xx_put_chip(base_vha);
|
|
|
|
|
|
|
|
/* Disable timer */
|
|
|
|
if (base_vha->timer_active)
|
|
|
|
qla2x00_stop_timer(base_vha);
|
|
|
|
|
|
|
|
base_vha->flags.online = 0;
|
|
|
|
|
2015-12-17 19:56:56 +00:00
|
|
|
/* free DMA memory */
|
|
|
|
if (ha->exlogin_buf)
|
|
|
|
qla2x00_free_exlogin_buffer(ha);
|
|
|
|
|
2015-12-17 19:56:57 +00:00
|
|
|
/* free DMA memory */
|
|
|
|
if (ha->exchoffld_buf)
|
|
|
|
qla2x00_free_exchoffld_buffer(ha);
|
|
|
|
|
2013-10-30 07:38:15 +00:00
|
|
|
qla2x00_destroy_deferred_work(ha);
|
|
|
|
|
|
|
|
qlt_remove_target(ha, base_vha);
|
|
|
|
|
|
|
|
qla2x00_free_sysfs_attr(base_vha, true);
|
|
|
|
|
|
|
|
fc_remove_host(base_vha->host);
|
|
|
|
|
|
|
|
scsi_remove_host(base_vha->host);
|
|
|
|
|
|
|
|
qla2x00_free_device(base_vha);
|
|
|
|
|
2014-08-26 21:10:41 +00:00
|
|
|
qla2x00_clear_drv_active(ha);
|
2013-10-30 07:38:15 +00:00
|
|
|
|
2014-09-25 09:16:51 +00:00
|
|
|
scsi_host_put(base_vha->host);
|
|
|
|
|
2013-10-30 07:38:15 +00:00
|
|
|
qla2x00_unmap_iobases(ha);
|
2008-12-10 00:45:39 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
pci_release_selected_regions(ha->pdev, ha->bars);
|
|
|
|
kfree(ha);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-01-12 21:02:46 +00:00
|
|
|
pci_disable_pcie_error_reporting(pdev);
|
|
|
|
|
2007-03-27 22:49:49 +00:00
|
|
|
pci_disable_device(pdev);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 21:44:24 +00:00
|
|
|
static inline void
|
|
|
|
qla24xx_free_purex_list(struct purex_list *list)
|
|
|
|
{
|
|
|
|
struct list_head *item, *next;
|
|
|
|
ulong flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&list->lock, flags);
|
|
|
|
list_for_each_safe(item, next, &list->head) {
|
|
|
|
list_del(item);
|
|
|
|
kfree(list_entry(item, struct purex_item, list));
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&list->lock, flags);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static void
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_free_device(scsi_qla_host_t *vha)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-12-16 05:29:46 +00:00
|
|
|
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
|
|
|
|
|
|
|
|
/* Disable timer */
|
|
|
|
if (vha->timer_active)
|
|
|
|
qla2x00_stop_timer(vha);
|
|
|
|
|
2009-04-07 05:33:40 +00:00
|
|
|
qla25xx_delete_queues(vha);
|
2009-12-16 05:29:46 +00:00
|
|
|
vha->flags.online = 0;
|
|
|
|
|
2005-08-27 02:10:20 +00:00
|
|
|
/* turn-off interrupts on the card */
|
2010-04-13 00:59:55 +00:00
|
|
|
if (ha->interrupts_on) {
|
|
|
|
vha->flags.init_done = 0;
|
2007-07-19 22:06:00 +00:00
|
|
|
ha->isp_ops->disable_intrs(ha);
|
2010-04-13 00:59:55 +00:00
|
|
|
}
|
2005-08-27 02:10:20 +00:00
|
|
|
|
2016-12-12 22:40:09 +00:00
|
|
|
qla2x00_free_fcports(vha);
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_free_irqs(vha);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-12-12 22:40:09 +00:00
|
|
|
/* Flush the work queue and remove it */
|
|
|
|
if (ha->wq) {
|
|
|
|
flush_workqueue(ha->wq);
|
|
|
|
destroy_workqueue(ha->wq);
|
|
|
|
ha->wq = NULL;
|
|
|
|
}
|
|
|
|
|
2010-07-23 10:28:30 +00:00
|
|
|
|
2020-02-12 21:44:24 +00:00
|
|
|
qla24xx_free_purex_list(&vha->purex_list);
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_mem_free(ha);
|
2008-12-10 00:45:39 +00:00
|
|
|
|
2011-08-16 18:31:44 +00:00
|
|
|
qla82xx_md_free(vha);
|
|
|
|
|
2021-06-24 05:26:00 +00:00
|
|
|
qla_edif_sadb_release_free_pool(ha);
|
|
|
|
qla_edif_sadb_release(ha);
|
|
|
|
|
2008-12-10 00:45:39 +00:00
|
|
|
qla2x00_free_queues(ha);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-07-23 10:28:30 +00:00
|
|
|
void qla2x00_free_fcports(struct scsi_qla_host *vha)
|
|
|
|
{
|
|
|
|
fc_port_t *fcport, *tfcport;
|
|
|
|
|
2019-04-02 21:24:29 +00:00
|
|
|
list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list)
|
|
|
|
qla2x00_free_fcport(fcport);
|
2010-07-23 10:28:30 +00:00
|
|
|
}
|
|
|
|
|
2006-01-20 22:53:13 +00:00
|
|
|
static inline void
|
2019-12-17 22:06:04 +00:00
|
|
|
qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport)
|
2006-01-20 22:53:13 +00:00
|
|
|
{
|
2019-12-17 22:06:04 +00:00
|
|
|
int now;
|
2006-01-20 22:53:13 +00:00
|
|
|
|
|
|
|
if (!fcport->rport)
|
|
|
|
return;
|
|
|
|
|
2019-12-17 22:06:04 +00:00
|
|
|
if (fcport->rport) {
|
|
|
|
ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
|
|
|
|
"%s %8phN. rport %p roles %x\n",
|
|
|
|
__func__, fcport->port_name, fcport->rport,
|
|
|
|
fcport->rport->roles);
|
|
|
|
fc_remote_port_delete(fcport->rport);
|
2012-05-15 18:34:28 +00:00
|
|
|
}
|
2019-12-17 22:06:04 +00:00
|
|
|
qlt_do_generation_tick(vha, &now);
|
2006-01-20 22:53:13 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* qla2x00_mark_device_lost Updates fcport state when device goes offline.
|
|
|
|
*
|
|
|
|
* Input: ha = adapter block pointer. fcport = port structure pointer.
|
|
|
|
*
|
|
|
|
* Return: None.
|
|
|
|
*
|
|
|
|
* Context:
|
|
|
|
*/
|
2008-11-06 18:40:51 +00:00
|
|
|
void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
|
2019-12-17 22:06:04 +00:00
|
|
|
int do_login)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2013-03-28 12:21:23 +00:00
|
|
|
if (IS_QLAFX00(vha->hw)) {
|
|
|
|
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
|
2019-12-17 22:06:04 +00:00
|
|
|
qla2x00_schedule_rport_del(vha, fcport);
|
2013-03-28 12:21:23 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2007-07-05 20:16:51 +00:00
|
|
|
if (atomic_read(&fcport->state) == FCS_ONLINE &&
|
2012-05-15 18:34:20 +00:00
|
|
|
vha->vp_idx == fcport->vha->vp_idx) {
|
2011-03-30 18:46:32 +00:00
|
|
|
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
|
2019-12-17 22:06:04 +00:00
|
|
|
qla2x00_schedule_rport_del(vha, fcport);
|
2008-11-06 18:40:51 +00:00
|
|
|
}
|
2021-06-24 05:26:02 +00:00
|
|
|
|
2005-07-06 17:32:07 +00:00
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* We may need to retry the login, so don't change the state of the
|
|
|
|
* port but do the retries.
|
|
|
|
*/
|
|
|
|
if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
|
2011-03-30 18:46:32 +00:00
|
|
|
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!do_login)
|
|
|
|
return;
|
|
|
|
|
2015-08-04 17:38:02 +00:00
|
|
|
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-12-17 22:06:04 +00:00
|
|
|
qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
fc_port_t *fcport;
|
|
|
|
|
2017-06-02 16:12:01 +00:00
|
|
|
ql_dbg(ql_dbg_disc, vha, 0x20f1,
|
|
|
|
"Mark all dev lost\n");
|
2017-01-20 06:28:00 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
list_for_each_entry(fcport, &vha->vp_fcports, list) {
|
2021-08-10 04:37:10 +00:00
|
|
|
if (fcport->loop_id != FC_NO_LOOP_ID &&
|
|
|
|
(fcport->flags & FCF_FCP2_DEVICE) &&
|
|
|
|
fcport->port_type == FCT_TARGET &&
|
|
|
|
!qla2x00_reset_active(vha)) {
|
|
|
|
ql_dbg(ql_dbg_disc, vha, 0x211a,
|
|
|
|
"Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC",
|
|
|
|
fcport->flags, fcport->port_type,
|
|
|
|
fcport->d_id.b24, fcport->port_name);
|
|
|
|
continue;
|
|
|
|
}
|
2017-01-20 06:28:00 +00:00
|
|
|
fcport->scan_state = 0;
|
2017-12-28 20:33:43 +00:00
|
|
|
qlt_schedule_sess_for_deletion(fcport);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-17 21:44:12 +00:00
|
|
|
static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (IS_FWI2_CAPABLE(ha))
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
|
|
|
|
set_bit(i, ha->loop_id_map);
|
|
|
|
set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
|
|
|
|
set_bit(BROADCAST, ha->loop_id_map);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* qla2x00_mem_alloc
|
|
|
|
* Allocates adapter memory.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 = success.
|
2008-01-31 20:33:48 +00:00
|
|
|
* !0 = failure.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-01-31 20:33:48 +00:00
|
|
|
static int
|
2008-12-10 00:45:39 +00:00
|
|
|
qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
|
|
|
|
struct req_que **req, struct rsp_que **rsp)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
char name[16];
|
2021-06-24 05:25:59 +00:00
|
|
|
int rc;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-31 20:33:48 +00:00
|
|
|
ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
|
2008-11-06 18:40:51 +00:00
|
|
|
&ha->init_cb_dma, GFP_KERNEL);
|
2008-01-31 20:33:48 +00:00
|
|
|
if (!ha->init_cb)
|
2008-11-06 18:40:51 +00:00
|
|
|
goto fail;
|
2008-01-31 20:33:48 +00:00
|
|
|
|
2021-06-24 05:25:59 +00:00
|
|
|
rc = btree_init32(&ha->host_map);
|
|
|
|
if (rc)
|
2012-05-15 18:34:28 +00:00
|
|
|
goto fail_free_init_cb;
|
|
|
|
|
2021-06-24 05:25:59 +00:00
|
|
|
if (qlt_mem_alloc(ha) < 0)
|
|
|
|
goto fail_free_btree;
|
|
|
|
|
2012-02-09 19:15:57 +00:00
|
|
|
ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
|
|
|
|
qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
|
2008-11-06 18:40:51 +00:00
|
|
|
if (!ha->gid_list)
|
2012-05-15 18:34:28 +00:00
|
|
|
goto fail_free_tgt_mem;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-31 20:33:48 +00:00
|
|
|
ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
|
|
|
|
if (!ha->srb_mempool)
|
2008-11-06 18:40:51 +00:00
|
|
|
goto fail_free_gid_list;
|
2008-01-31 20:33:48 +00:00
|
|
|
|
2021-06-24 05:26:04 +00:00
|
|
|
if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) {
|
2010-04-13 00:59:55 +00:00
|
|
|
/* Allocate cache for CT6 Ctx. */
|
|
|
|
if (!ctx_cachep) {
|
|
|
|
ctx_cachep = kmem_cache_create("qla2xxx_ctx",
|
|
|
|
sizeof(struct ct6_dsd), 0,
|
|
|
|
SLAB_HWCACHE_ALIGN, NULL);
|
|
|
|
if (!ctx_cachep)
|
2016-12-24 02:06:10 +00:00
|
|
|
goto fail_free_srb_mempool;
|
2010-04-13 00:59:55 +00:00
|
|
|
}
|
|
|
|
ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
|
|
|
|
ctx_cachep);
|
|
|
|
if (!ha->ctx_mempool)
|
|
|
|
goto fail_free_srb_mempool;
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
|
|
|
|
"ctx_cachep=%p ctx_mempool=%p.\n",
|
|
|
|
ctx_cachep, ha->ctx_mempool);
|
2010-04-13 00:59:55 +00:00
|
|
|
}
|
|
|
|
|
2008-01-31 20:33:48 +00:00
|
|
|
/* Get memory for cached NVRAM */
|
|
|
|
ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
|
|
|
|
if (!ha->nvram)
|
2010-04-13 00:59:55 +00:00
|
|
|
goto fail_free_ctx_mempool;
|
2008-01-31 20:33:48 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
|
|
|
|
ha->pdev->device);
|
|
|
|
ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
|
|
|
|
DMA_POOL_SIZE, 8, 0);
|
|
|
|
if (!ha->s_dma_pool)
|
|
|
|
goto fail_free_nvram;
|
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
|
|
|
|
"init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
|
|
|
|
ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
|
|
|
|
|
2021-06-24 05:26:04 +00:00
|
|
|
if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) {
|
2010-04-13 00:59:55 +00:00
|
|
|
ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
|
|
|
|
DSD_LIST_DMA_POOL_SIZE, 8, 0);
|
|
|
|
if (!ha->dl_dma_pool) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
|
|
|
|
"Failed to allocate memory for dl_dma_pool.\n");
|
2010-04-13 00:59:55 +00:00
|
|
|
goto fail_s_dma_pool;
|
|
|
|
}
|
|
|
|
|
|
|
|
ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
|
|
|
|
FCP_CMND_DMA_POOL_SIZE, 8, 0);
|
|
|
|
if (!ha->fcp_cmnd_dma_pool) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
|
|
|
|
"Failed to allocate memory for fcp_cmnd_dma_pool.\n");
|
2010-04-13 00:59:55 +00:00
|
|
|
goto fail_dl_dma_pool;
|
|
|
|
}
|
2018-12-21 17:33:45 +00:00
|
|
|
|
|
|
|
if (ql2xenabledif) {
|
|
|
|
u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE;
|
|
|
|
struct dsd_dma *dsd, *nxt;
|
|
|
|
uint i;
|
|
|
|
/* Creata a DMA pool of buffers for DIF bundling */
|
|
|
|
ha->dif_bundl_pool = dma_pool_create(name,
|
|
|
|
&ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0);
|
|
|
|
if (!ha->dif_bundl_pool) {
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
|
|
|
|
"%s: failed create dif_bundl_pool\n",
|
|
|
|
__func__);
|
|
|
|
goto fail_dif_bundl_dma_pool;
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&ha->pool.good.head);
|
|
|
|
INIT_LIST_HEAD(&ha->pool.unusable.head);
|
|
|
|
ha->pool.good.count = 0;
|
|
|
|
ha->pool.unusable.count = 0;
|
|
|
|
for (i = 0; i < 128; i++) {
|
|
|
|
dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC);
|
|
|
|
if (!dsd) {
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev,
|
|
|
|
0xe0ee, "%s: failed alloc dsd\n",
|
|
|
|
__func__);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
ha->dif_bundle_kallocs++;
|
|
|
|
|
|
|
|
dsd->dsd_addr = dma_pool_alloc(
|
|
|
|
ha->dif_bundl_pool, GFP_ATOMIC,
|
|
|
|
&dsd->dsd_list_dma);
|
|
|
|
if (!dsd->dsd_addr) {
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev,
|
|
|
|
0xe0ee,
|
|
|
|
"%s: failed alloc ->dsd_addr\n",
|
|
|
|
__func__);
|
|
|
|
kfree(dsd);
|
|
|
|
ha->dif_bundle_kallocs--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ha->dif_bundle_dma_allocs++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if DMA buffer crosses 4G boundary,
|
|
|
|
* put it on bad list
|
|
|
|
*/
|
|
|
|
if (MSD(dsd->dsd_list_dma) ^
|
|
|
|
MSD(dsd->dsd_list_dma + bufsize)) {
|
|
|
|
list_add_tail(&dsd->list,
|
|
|
|
&ha->pool.unusable.head);
|
|
|
|
ha->pool.unusable.count++;
|
|
|
|
} else {
|
|
|
|
list_add_tail(&dsd->list,
|
|
|
|
&ha->pool.good.head);
|
|
|
|
ha->pool.good.count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return the good ones back to the pool */
|
|
|
|
list_for_each_entry_safe(dsd, nxt,
|
|
|
|
&ha->pool.good.head, list) {
|
|
|
|
list_del(&dsd->list);
|
|
|
|
dma_pool_free(ha->dif_bundl_pool,
|
|
|
|
dsd->dsd_addr, dsd->dsd_list_dma);
|
|
|
|
ha->dif_bundle_dma_allocs--;
|
|
|
|
kfree(dsd);
|
|
|
|
ha->dif_bundle_kallocs--;
|
|
|
|
}
|
|
|
|
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
|
|
|
|
"%s: dif dma pool (good=%u unusable=%u)\n",
|
|
|
|
__func__, ha->pool.good.count,
|
|
|
|
ha->pool.unusable.count);
|
|
|
|
}
|
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
|
2018-12-21 17:33:45 +00:00
|
|
|
"dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n",
|
|
|
|
ha->dl_dma_pool, ha->fcp_cmnd_dma_pool,
|
|
|
|
ha->dif_bundl_pool);
|
2010-04-13 00:59:55 +00:00
|
|
|
}
|
|
|
|
|
2008-01-31 20:33:48 +00:00
|
|
|
/* Allocate memory for SNS commands */
|
|
|
|
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
|
2008-11-06 18:40:51 +00:00
|
|
|
/* Get consistent memory allocated for SNS commands */
|
2008-01-31 20:33:48 +00:00
|
|
|
ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
|
2008-11-06 18:40:51 +00:00
|
|
|
sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
|
2008-01-31 20:33:48 +00:00
|
|
|
if (!ha->sns_cmd)
|
2008-11-06 18:40:51 +00:00
|
|
|
goto fail_dma_pool;
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
|
2011-11-18 17:03:06 +00:00
|
|
|
"sns_cmd: %p.\n", ha->sns_cmd);
|
2008-01-31 20:33:48 +00:00
|
|
|
} else {
|
2008-11-06 18:40:51 +00:00
|
|
|
/* Get consistent memory allocated for MS IOCB */
|
2008-01-31 20:33:48 +00:00
|
|
|
ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
|
2008-11-06 18:40:51 +00:00
|
|
|
&ha->ms_iocb_dma);
|
2008-01-31 20:33:48 +00:00
|
|
|
if (!ha->ms_iocb)
|
2008-11-06 18:40:51 +00:00
|
|
|
goto fail_dma_pool;
|
|
|
|
/* Get consistent memory allocated for CT SNS commands */
|
2008-01-31 20:33:48 +00:00
|
|
|
ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
|
2008-11-06 18:40:51 +00:00
|
|
|
sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
|
2008-01-31 20:33:48 +00:00
|
|
|
if (!ha->ct_sns)
|
|
|
|
goto fail_free_ms_iocb;
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
|
|
|
|
"ms_iocb=%p ct_sns=%p.\n",
|
|
|
|
ha->ms_iocb, ha->ct_sns);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
/* Allocate memory for request ring */
|
2008-12-10 00:45:39 +00:00
|
|
|
*req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
|
|
|
|
if (!*req) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
|
|
|
|
"Failed to allocate memory for req.\n");
|
2008-11-06 18:40:51 +00:00
|
|
|
goto fail_req;
|
|
|
|
}
|
2008-12-10 00:45:39 +00:00
|
|
|
(*req)->length = req_len;
|
|
|
|
(*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
|
|
|
|
((*req)->length + 1) * sizeof(request_t),
|
|
|
|
&(*req)->dma, GFP_KERNEL);
|
|
|
|
if (!(*req)->ring) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
|
|
|
|
"Failed to allocate memory for req_ring.\n");
|
2008-11-06 18:40:51 +00:00
|
|
|
goto fail_req_ring;
|
|
|
|
}
|
|
|
|
/* Allocate memory for response ring */
|
2008-12-10 00:45:39 +00:00
|
|
|
*rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
|
|
|
|
if (!*rsp) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
|
|
|
|
"Failed to allocate memory for rsp.\n");
|
2008-11-06 18:40:51 +00:00
|
|
|
goto fail_rsp;
|
|
|
|
}
|
2008-12-10 00:45:39 +00:00
|
|
|
(*rsp)->hw = ha;
|
|
|
|
(*rsp)->length = rsp_len;
|
|
|
|
(*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
|
|
|
|
((*rsp)->length + 1) * sizeof(response_t),
|
|
|
|
&(*rsp)->dma, GFP_KERNEL);
|
|
|
|
if (!(*rsp)->ring) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
|
|
|
|
"Failed to allocate memory for rsp_ring.\n");
|
2008-11-06 18:40:51 +00:00
|
|
|
goto fail_rsp_ring;
|
|
|
|
}
|
2008-12-10 00:45:39 +00:00
|
|
|
(*req)->rsp = *rsp;
|
|
|
|
(*rsp)->req = *req;
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
|
|
|
|
"req=%p req->length=%d req->ring=%p rsp=%p "
|
|
|
|
"rsp->length=%d rsp->ring=%p.\n",
|
|
|
|
*req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
|
|
|
|
(*rsp)->ring);
|
2008-12-10 00:45:39 +00:00
|
|
|
/* Allocate memory for NVRAM data for vports */
|
|
|
|
if (ha->nvram_npiv_size) {
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:03:40 +00:00
|
|
|
ha->npiv_info = kcalloc(ha->nvram_npiv_size,
|
|
|
|
sizeof(struct qla_npiv_entry),
|
|
|
|
GFP_KERNEL);
|
2008-12-10 00:45:39 +00:00
|
|
|
if (!ha->npiv_info) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
|
|
|
|
"Failed to allocate memory for npiv_info.\n");
|
2008-12-10 00:45:39 +00:00
|
|
|
goto fail_npiv_info;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
ha->npiv_info = NULL;
|
2008-01-31 20:33:48 +00:00
|
|
|
|
2009-03-24 16:08:01 +00:00
|
|
|
/* Get consistent memory allocated for EX-INIT-CB. */
|
2019-03-12 18:08:13 +00:00
|
|
|
if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
|
|
|
|
IS_QLA28XX(ha)) {
|
2009-03-24 16:08:01 +00:00
|
|
|
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
|
|
|
|
&ha->ex_init_cb_dma);
|
|
|
|
if (!ha->ex_init_cb)
|
|
|
|
goto fail_ex_init_cb;
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
|
|
|
|
"ex_init_cb=%p.\n", ha->ex_init_cb);
|
2009-03-24 16:08:01 +00:00
|
|
|
}
|
|
|
|
|
2020-06-30 10:22:29 +00:00
|
|
|
/* Get consistent memory allocated for Special Features-CB. */
|
|
|
|
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
|
2021-03-13 02:41:15 +00:00
|
|
|
ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL,
|
2020-06-30 10:22:29 +00:00
|
|
|
&ha->sf_init_cb_dma);
|
|
|
|
if (!ha->sf_init_cb)
|
|
|
|
goto fail_sf_init_cb;
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
|
|
|
|
"sf_init_cb=%p.\n", ha->sf_init_cb);
|
|
|
|
}
|
|
|
|
|
2010-04-13 00:59:55 +00:00
|
|
|
INIT_LIST_HEAD(&ha->gbl_dsd_list);
|
|
|
|
|
2010-05-04 22:01:26 +00:00
|
|
|
/* Get consistent memory allocated for Async Port-Database. */
|
|
|
|
if (!IS_FWI2_CAPABLE(ha)) {
|
|
|
|
ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
|
|
|
|
&ha->async_pd_dma);
|
|
|
|
if (!ha->async_pd)
|
|
|
|
goto fail_async_pd;
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
|
|
|
|
"async_pd=%p.\n", ha->async_pd);
|
2010-05-04 22:01:26 +00:00
|
|
|
}
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
INIT_LIST_HEAD(&ha->vp_list);
|
2012-08-22 18:21:00 +00:00
|
|
|
|
|
|
|
/* Allocate memory for our loop_id bitmap */
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:03:40 +00:00
|
|
|
ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE),
|
|
|
|
sizeof(long),
|
|
|
|
GFP_KERNEL);
|
2012-08-22 18:21:00 +00:00
|
|
|
if (!ha->loop_id_map)
|
2016-12-24 02:06:10 +00:00
|
|
|
goto fail_loop_id_map;
|
2012-08-22 18:21:00 +00:00
|
|
|
else {
|
|
|
|
qla2x00_set_reserved_loop_ids(ha);
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
|
2014-01-21 07:00:10 +00:00
|
|
|
"loop_id_map=%p.\n", ha->loop_id_map);
|
2012-08-22 18:21:00 +00:00
|
|
|
}
|
|
|
|
|
2017-08-23 22:05:07 +00:00
|
|
|
ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev,
|
|
|
|
SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL);
|
|
|
|
if (!ha->sfp_data) {
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
|
|
|
|
"Unable to allocate memory for SFP read-data.\n");
|
|
|
|
goto fail_sfp_data;
|
|
|
|
}
|
|
|
|
|
2019-03-12 18:08:22 +00:00
|
|
|
ha->flt = dma_alloc_coherent(&ha->pdev->dev,
|
|
|
|
sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ha->flt) {
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
|
|
|
|
"Unable to allocate memory for FLT.\n");
|
|
|
|
goto fail_flt_buffer;
|
|
|
|
}
|
|
|
|
|
2021-06-24 05:25:58 +00:00
|
|
|
/* allocate the purex dma pool */
|
|
|
|
ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev,
|
|
|
|
MAX_PAYLOAD, 8, 0);
|
|
|
|
|
|
|
|
if (!ha->purex_dma_pool) {
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
|
|
|
|
"Unable to allocate purex_dma_pool.\n");
|
|
|
|
goto fail_flt;
|
|
|
|
}
|
|
|
|
|
|
|
|
ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
|
|
|
|
ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
|
|
|
|
ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!ha->elsrej.c) {
|
|
|
|
ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
|
|
|
|
"Alloc failed for els reject cmd.\n");
|
|
|
|
goto fail_elsrej;
|
|
|
|
}
|
|
|
|
ha->elsrej.c->er_cmd = ELS_LS_RJT;
|
2021-08-17 05:13:05 +00:00
|
|
|
ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
|
2021-06-24 05:25:58 +00:00
|
|
|
ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
|
2014-01-21 07:00:10 +00:00
|
|
|
return 0;
|
2008-11-06 18:40:51 +00:00
|
|
|
|
2021-06-24 05:25:58 +00:00
|
|
|
fail_elsrej:
|
|
|
|
dma_pool_destroy(ha->purex_dma_pool);
|
|
|
|
fail_flt:
|
|
|
|
dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
|
|
|
|
ha->flt, ha->flt_dma);
|
|
|
|
|
2019-03-12 18:08:22 +00:00
|
|
|
fail_flt_buffer:
|
|
|
|
dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
|
|
|
|
ha->sfp_data, ha->sfp_data_dma);
|
2017-08-23 22:05:07 +00:00
|
|
|
fail_sfp_data:
|
|
|
|
kfree(ha->loop_id_map);
|
2016-12-24 02:06:10 +00:00
|
|
|
fail_loop_id_map:
|
|
|
|
dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
|
2010-05-04 22:01:26 +00:00
|
|
|
fail_async_pd:
|
2020-06-30 10:22:29 +00:00
|
|
|
dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma);
|
|
|
|
fail_sf_init_cb:
|
2010-05-04 22:01:26 +00:00
|
|
|
dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
|
2009-03-24 16:08:01 +00:00
|
|
|
fail_ex_init_cb:
|
|
|
|
kfree(ha->npiv_info);
|
2008-12-10 00:45:39 +00:00
|
|
|
fail_npiv_info:
|
|
|
|
dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
|
|
|
|
sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
|
|
|
|
(*rsp)->ring = NULL;
|
|
|
|
(*rsp)->dma = 0;
|
2008-11-06 18:40:51 +00:00
|
|
|
fail_rsp_ring:
|
2008-12-10 00:45:39 +00:00
|
|
|
kfree(*rsp);
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
*rsp = NULL;
|
2008-11-06 18:40:51 +00:00
|
|
|
fail_rsp:
|
2008-12-10 00:45:39 +00:00
|
|
|
dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
|
|
|
|
sizeof(request_t), (*req)->ring, (*req)->dma);
|
|
|
|
(*req)->ring = NULL;
|
|
|
|
(*req)->dma = 0;
|
2008-11-06 18:40:51 +00:00
|
|
|
fail_req_ring:
|
2008-12-10 00:45:39 +00:00
|
|
|
kfree(*req);
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
*req = NULL;
|
2008-11-06 18:40:51 +00:00
|
|
|
fail_req:
|
|
|
|
dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
|
|
|
|
ha->ct_sns, ha->ct_sns_dma);
|
|
|
|
ha->ct_sns = NULL;
|
|
|
|
ha->ct_sns_dma = 0;
|
2008-01-31 20:33:48 +00:00
|
|
|
fail_free_ms_iocb:
|
|
|
|
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
|
|
|
|
ha->ms_iocb = NULL;
|
|
|
|
ha->ms_iocb_dma = 0;
|
2016-12-24 02:06:10 +00:00
|
|
|
|
|
|
|
if (ha->sns_cmd)
|
|
|
|
dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
|
|
|
|
ha->sns_cmd, ha->sns_cmd_dma);
|
2008-11-06 18:40:51 +00:00
|
|
|
fail_dma_pool:
|
2018-12-21 17:33:45 +00:00
|
|
|
if (ql2xenabledif) {
|
|
|
|
struct dsd_dma *dsd, *nxt;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
|
|
|
|
list) {
|
|
|
|
list_del(&dsd->list);
|
|
|
|
dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
|
|
|
|
dsd->dsd_list_dma);
|
|
|
|
ha->dif_bundle_dma_allocs--;
|
|
|
|
kfree(dsd);
|
|
|
|
ha->dif_bundle_kallocs--;
|
|
|
|
ha->pool.unusable.count--;
|
|
|
|
}
|
|
|
|
dma_pool_destroy(ha->dif_bundl_pool);
|
|
|
|
ha->dif_bundl_pool = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
fail_dif_bundl_dma_pool:
|
2010-05-04 22:01:30 +00:00
|
|
|
if (IS_QLA82XX(ha) || ql2xenabledif) {
|
2010-04-13 00:59:55 +00:00
|
|
|
dma_pool_destroy(ha->fcp_cmnd_dma_pool);
|
|
|
|
ha->fcp_cmnd_dma_pool = NULL;
|
|
|
|
}
|
|
|
|
fail_dl_dma_pool:
|
2010-05-04 22:01:30 +00:00
|
|
|
if (IS_QLA82XX(ha) || ql2xenabledif) {
|
2010-04-13 00:59:55 +00:00
|
|
|
dma_pool_destroy(ha->dl_dma_pool);
|
|
|
|
ha->dl_dma_pool = NULL;
|
|
|
|
}
|
|
|
|
fail_s_dma_pool:
|
2008-11-06 18:40:51 +00:00
|
|
|
dma_pool_destroy(ha->s_dma_pool);
|
|
|
|
ha->s_dma_pool = NULL;
|
2008-01-31 20:33:48 +00:00
|
|
|
fail_free_nvram:
|
|
|
|
kfree(ha->nvram);
|
|
|
|
ha->nvram = NULL;
|
2010-04-13 00:59:55 +00:00
|
|
|
fail_free_ctx_mempool:
|
2018-12-02 20:52:11 +00:00
|
|
|
mempool_destroy(ha->ctx_mempool);
|
2010-04-13 00:59:55 +00:00
|
|
|
ha->ctx_mempool = NULL;
|
2008-01-31 20:33:48 +00:00
|
|
|
fail_free_srb_mempool:
|
2018-12-02 20:52:11 +00:00
|
|
|
mempool_destroy(ha->srb_mempool);
|
2008-01-31 20:33:48 +00:00
|
|
|
ha->srb_mempool = NULL;
|
|
|
|
fail_free_gid_list:
|
2012-02-09 19:15:57 +00:00
|
|
|
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
|
|
|
|
ha->gid_list,
|
2008-11-06 18:40:51 +00:00
|
|
|
ha->gid_list_dma);
|
2008-01-31 20:33:48 +00:00
|
|
|
ha->gid_list = NULL;
|
|
|
|
ha->gid_list_dma = 0;
|
2012-05-15 18:34:28 +00:00
|
|
|
fail_free_tgt_mem:
|
|
|
|
qlt_mem_free(ha);
|
2021-06-24 05:25:59 +00:00
|
|
|
fail_free_btree:
|
|
|
|
btree_destroy32(&ha->host_map);
|
2008-11-06 18:40:51 +00:00
|
|
|
fail_free_init_cb:
|
|
|
|
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
|
|
|
|
ha->init_cb_dma);
|
|
|
|
ha->init_cb = NULL;
|
|
|
|
ha->init_cb_dma = 0;
|
2008-01-31 20:33:48 +00:00
|
|
|
fail:
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, NULL, 0x0030,
|
|
|
|
"Memory allocation failure.\n");
|
2008-01-31 20:33:48 +00:00
|
|
|
return -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-12-17 19:56:56 +00:00
|
|
|
int
|
|
|
|
qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
|
|
|
|
{
|
|
|
|
int rval;
|
2020-09-04 04:51:21 +00:00
|
|
|
uint16_t size, max_cnt;
|
|
|
|
uint32_t temp;
|
2015-12-17 19:56:56 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
|
|
|
|
/* Return if we don't need to alloacate any extended logins */
|
2020-09-04 04:51:21 +00:00
|
|
|
if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400)
|
2015-12-17 19:56:56 +00:00
|
|
|
return QLA_SUCCESS;
|
|
|
|
|
2017-06-02 16:12:03 +00:00
|
|
|
if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
|
|
|
|
return QLA_SUCCESS;
|
|
|
|
|
2015-12-17 19:56:56 +00:00
|
|
|
ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
|
|
|
|
max_cnt = 0;
|
|
|
|
rval = qla_get_exlogin_status(vha, &size, &max_cnt);
|
|
|
|
if (rval != QLA_SUCCESS) {
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0xd029,
|
|
|
|
"Failed to get exlogin status.\n");
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
|
|
|
temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
|
2017-06-02 16:12:03 +00:00
|
|
|
temp *= size;
|
|
|
|
|
|
|
|
if (temp != ha->exlogin_size) {
|
|
|
|
qla2x00_free_exlogin_buffer(ha);
|
|
|
|
ha->exlogin_size = temp;
|
|
|
|
|
|
|
|
ql_log(ql_log_info, vha, 0xd024,
|
|
|
|
"EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
|
|
|
|
max_cnt, size, temp);
|
|
|
|
|
|
|
|
ql_log(ql_log_info, vha, 0xd025,
|
|
|
|
"EXLOGIN: requested size=0x%x\n", ha->exlogin_size);
|
|
|
|
|
|
|
|
/* Get consistent memory for extended logins */
|
|
|
|
ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
|
|
|
|
ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
|
|
|
|
if (!ha->exlogin_buf) {
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
|
2015-12-17 19:56:56 +00:00
|
|
|
"Failed to allocate memory for exlogin_buf_dma.\n");
|
2017-06-02 16:12:03 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2015-12-17 19:56:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now configure the dma buffer */
|
|
|
|
rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
|
|
|
|
if (rval) {
|
2017-06-02 16:12:01 +00:00
|
|
|
ql_log(ql_log_fatal, vha, 0xd033,
|
2015-12-17 19:56:56 +00:00
|
|
|
"Setup extended login buffer ****FAILED****.\n");
|
|
|
|
qla2x00_free_exlogin_buffer(ha);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* qla2x00_free_exlogin_buffer
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* ha = adapter block pointer
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
|
|
|
|
{
|
|
|
|
if (ha->exlogin_buf) {
|
|
|
|
dma_free_coherent(&ha->pdev->dev, ha->exlogin_size,
|
|
|
|
ha->exlogin_buf, ha->exlogin_buf_dma);
|
|
|
|
ha->exlogin_buf = NULL;
|
|
|
|
ha->exlogin_size = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-02 16:12:03 +00:00
|
|
|
static void
|
|
|
|
qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
|
|
|
|
{
|
|
|
|
u32 temp;
|
2018-09-11 17:18:18 +00:00
|
|
|
struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
|
2017-06-02 16:12:03 +00:00
|
|
|
*ret_cnt = FW_DEF_EXCHANGES_CNT;
|
|
|
|
|
2017-12-28 20:33:12 +00:00
|
|
|
if (max_cnt > vha->hw->max_exchg)
|
|
|
|
max_cnt = vha->hw->max_exchg;
|
|
|
|
|
2017-06-02 16:12:03 +00:00
|
|
|
if (qla_ini_mode_enabled(vha)) {
|
2018-09-11 17:18:18 +00:00
|
|
|
if (vha->ql2xiniexchg > max_cnt)
|
|
|
|
vha->ql2xiniexchg = max_cnt;
|
|
|
|
|
|
|
|
if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
|
|
|
|
*ret_cnt = vha->ql2xiniexchg;
|
2017-06-02 16:12:03 +00:00
|
|
|
|
|
|
|
} else if (qla_tgt_mode_enabled(vha)) {
|
2018-09-11 17:18:18 +00:00
|
|
|
if (vha->ql2xexchoffld > max_cnt) {
|
|
|
|
vha->ql2xexchoffld = max_cnt;
|
|
|
|
icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
|
|
|
|
}
|
2017-06-02 16:12:03 +00:00
|
|
|
|
2018-09-11 17:18:18 +00:00
|
|
|
if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
|
|
|
|
*ret_cnt = vha->ql2xexchoffld;
|
2017-06-02 16:12:03 +00:00
|
|
|
} else if (qla_dual_mode_enabled(vha)) {
|
2018-09-11 17:18:18 +00:00
|
|
|
temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
|
2017-06-02 16:12:03 +00:00
|
|
|
if (temp > max_cnt) {
|
2018-09-11 17:18:18 +00:00
|
|
|
vha->ql2xiniexchg -= (temp - max_cnt)/2;
|
|
|
|
vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
|
2017-06-02 16:12:03 +00:00
|
|
|
temp = max_cnt;
|
2018-09-11 17:18:18 +00:00
|
|
|
icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
|
2017-06-02 16:12:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (temp > FW_DEF_EXCHANGES_CNT)
|
|
|
|
*ret_cnt = temp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-17 19:56:57 +00:00
|
|
|
int
|
|
|
|
qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
|
|
|
|
{
|
|
|
|
int rval;
|
2017-12-28 20:33:12 +00:00
|
|
|
u16 size, max_cnt;
|
|
|
|
u32 actual_cnt, totsz;
|
2015-12-17 19:56:57 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
|
2017-06-02 16:12:03 +00:00
|
|
|
if (!ha->flags.exchoffld_enabled)
|
|
|
|
return QLA_SUCCESS;
|
|
|
|
|
|
|
|
if (!IS_EXCHG_OFFLD_CAPABLE(ha))
|
2015-12-17 19:56:57 +00:00
|
|
|
return QLA_SUCCESS;
|
|
|
|
|
|
|
|
max_cnt = 0;
|
|
|
|
rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
|
|
|
|
if (rval != QLA_SUCCESS) {
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0xd012,
|
|
|
|
"Failed to get exlogin status.\n");
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
2017-12-28 20:33:12 +00:00
|
|
|
qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
|
|
|
|
ql_log(ql_log_info, vha, 0xd014,
|
|
|
|
"Actual exchange offload count: %d.\n", actual_cnt);
|
|
|
|
|
|
|
|
totsz = actual_cnt * size;
|
2015-12-17 19:56:57 +00:00
|
|
|
|
2017-12-28 20:33:12 +00:00
|
|
|
if (totsz != ha->exchoffld_size) {
|
2017-06-02 16:12:03 +00:00
|
|
|
qla2x00_free_exchoffld_buffer(ha);
|
2018-09-11 17:18:18 +00:00
|
|
|
if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
|
|
|
|
ha->exchoffld_size = 0;
|
|
|
|
ha->flags.exchoffld_enabled = 0;
|
|
|
|
return QLA_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-28 20:33:12 +00:00
|
|
|
ha->exchoffld_size = totsz;
|
2017-06-02 16:12:03 +00:00
|
|
|
|
|
|
|
ql_log(ql_log_info, vha, 0xd016,
|
2017-12-28 20:33:12 +00:00
|
|
|
"Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
|
|
|
|
max_cnt, actual_cnt, size, totsz);
|
2017-06-02 16:12:03 +00:00
|
|
|
|
|
|
|
ql_log(ql_log_info, vha, 0xd017,
|
|
|
|
"Exchange Buffers requested size = 0x%x\n",
|
|
|
|
ha->exchoffld_size);
|
|
|
|
|
|
|
|
/* Get consistent memory for extended logins */
|
|
|
|
ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
|
|
|
|
ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
|
|
|
|
if (!ha->exchoffld_buf) {
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
|
2017-12-28 20:33:12 +00:00
|
|
|
"Failed to allocate memory for Exchange Offload.\n");
|
|
|
|
|
|
|
|
if (ha->max_exchg >
|
|
|
|
(FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
|
|
|
|
ha->max_exchg -= REDUCE_EXCHANGES_CNT;
|
|
|
|
} else if (ha->max_exchg >
|
|
|
|
(FW_DEF_EXCHANGES_CNT + 512)) {
|
|
|
|
ha->max_exchg -= 512;
|
|
|
|
} else {
|
|
|
|
ha->flags.exchoffld_enabled = 0;
|
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
|
|
|
|
"Disabling Exchange offload due to lack of memory\n");
|
|
|
|
}
|
|
|
|
ha->exchoffld_size = 0;
|
|
|
|
|
2017-06-02 16:12:03 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2018-09-11 17:18:18 +00:00
|
|
|
} else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
|
|
|
|
/* pathological case */
|
|
|
|
qla2x00_free_exchoffld_buffer(ha);
|
|
|
|
ha->exchoffld_size = 0;
|
|
|
|
ha->flags.exchoffld_enabled = 0;
|
|
|
|
ql_log(ql_log_info, vha, 0xd016,
|
|
|
|
"Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
|
|
|
|
ha->exchoffld_size, actual_cnt, size, totsz);
|
|
|
|
return 0;
|
2015-12-17 19:56:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now configure the dma buffer */
|
2017-06-02 16:12:03 +00:00
|
|
|
rval = qla_set_exchoffld_mem_cfg(vha);
|
2015-12-17 19:56:57 +00:00
|
|
|
if (rval) {
|
|
|
|
ql_log(ql_log_fatal, vha, 0xd02e,
|
|
|
|
"Setup exchange offload buffer ****FAILED****.\n");
|
|
|
|
qla2x00_free_exchoffld_buffer(ha);
|
2017-06-02 16:12:03 +00:00
|
|
|
} else {
|
|
|
|
/* re-adjust number of target exchange */
|
|
|
|
struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb;
|
|
|
|
|
|
|
|
if (qla_ini_mode_enabled(vha))
|
|
|
|
icb->exchange_count = 0;
|
|
|
|
else
|
2018-09-11 17:18:18 +00:00
|
|
|
icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
|
2015-12-17 19:56:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* qla2x00_free_exchoffld_buffer
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* ha = adapter block pointer
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
|
|
|
|
{
|
|
|
|
if (ha->exchoffld_buf) {
|
|
|
|
dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size,
|
|
|
|
ha->exchoffld_buf, ha->exchoffld_buf_dma);
|
|
|
|
ha->exchoffld_buf = NULL;
|
|
|
|
ha->exchoffld_size = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2010-10-15 18:27:46 +00:00
|
|
|
* qla2x00_free_fw_dump
|
|
|
|
* Frees fw dump stuff.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Input:
|
2013-08-27 05:37:28 +00:00
|
|
|
* ha = adapter block pointer
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-01-17 17:02:15 +00:00
|
|
|
static void
|
2010-10-15 18:27:46 +00:00
|
|
|
qla2x00_free_fw_dump(struct qla_hw_data *ha)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-03-12 18:08:17 +00:00
|
|
|
struct fwdt *fwdt = ha->fwdt;
|
|
|
|
uint j;
|
|
|
|
|
2008-01-17 17:02:17 +00:00
|
|
|
if (ha->fce)
|
2014-02-26 09:15:06 +00:00
|
|
|
dma_free_coherent(&ha->pdev->dev,
|
|
|
|
FCE_SIZE, ha->fce, ha->fce_dma);
|
2008-01-17 17:02:17 +00:00
|
|
|
|
2014-02-26 09:15:06 +00:00
|
|
|
if (ha->eft)
|
|
|
|
dma_free_coherent(&ha->pdev->dev,
|
|
|
|
EFT_SIZE, ha->eft, ha->eft_dma);
|
|
|
|
|
2021-04-09 12:09:25 +00:00
|
|
|
vfree(ha->fw_dump);
|
2014-02-26 09:15:06 +00:00
|
|
|
|
2010-10-15 18:27:46 +00:00
|
|
|
ha->fce = NULL;
|
|
|
|
ha->fce_dma = 0;
|
2019-08-14 13:28:29 +00:00
|
|
|
ha->flags.fce_enabled = 0;
|
2010-10-15 18:27:46 +00:00
|
|
|
ha->eft = NULL;
|
|
|
|
ha->eft_dma = 0;
|
2020-04-30 12:18:00 +00:00
|
|
|
ha->fw_dumped = false;
|
2014-04-11 20:54:21 +00:00
|
|
|
ha->fw_dump_cap_flags = 0;
|
2010-10-15 18:27:46 +00:00
|
|
|
ha->fw_dump_reading = 0;
|
2014-02-26 09:15:06 +00:00
|
|
|
ha->fw_dump = NULL;
|
|
|
|
ha->fw_dump_len = 0;
|
2019-03-12 18:08:17 +00:00
|
|
|
|
|
|
|
for (j = 0; j < 2; j++, fwdt++) {
|
2021-04-09 12:09:25 +00:00
|
|
|
vfree(fwdt->template);
|
2019-03-12 18:08:17 +00:00
|
|
|
fwdt->template = NULL;
|
|
|
|
fwdt->length = 0;
|
|
|
|
}
|
2010-10-15 18:27:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* qla2x00_mem_free
|
|
|
|
* Frees all adapter allocated memory.
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* ha = adapter block pointer.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qla2x00_mem_free(struct qla_hw_data *ha)
|
|
|
|
{
|
|
|
|
qla2x00_free_fw_dump(ha);
|
|
|
|
|
2012-08-22 18:21:04 +00:00
|
|
|
if (ha->mctp_dump)
|
|
|
|
dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
|
|
|
|
ha->mctp_dump_dma);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->mctp_dump = NULL;
|
2012-08-22 18:21:04 +00:00
|
|
|
|
2018-12-02 20:52:11 +00:00
|
|
|
mempool_destroy(ha->srb_mempool);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->srb_mempool = NULL;
|
2006-06-23 23:10:29 +00:00
|
|
|
|
2009-06-03 16:55:14 +00:00
|
|
|
if (ha->dcbx_tlv)
|
|
|
|
dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
|
|
|
|
ha->dcbx_tlv, ha->dcbx_tlv_dma);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->dcbx_tlv = NULL;
|
2009-06-03 16:55:14 +00:00
|
|
|
|
2009-06-03 16:55:13 +00:00
|
|
|
if (ha->xgmac_data)
|
|
|
|
dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
|
|
|
|
ha->xgmac_data, ha->xgmac_data_dma);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->xgmac_data = NULL;
|
2009-06-03 16:55:13 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (ha->sns_cmd)
|
|
|
|
dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
|
2008-11-06 18:40:51 +00:00
|
|
|
ha->sns_cmd, ha->sns_cmd_dma);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->sns_cmd = NULL;
|
|
|
|
ha->sns_cmd_dma = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (ha->ct_sns)
|
|
|
|
dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
|
2008-11-06 18:40:51 +00:00
|
|
|
ha->ct_sns, ha->ct_sns_dma);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->ct_sns = NULL;
|
|
|
|
ha->ct_sns_dma = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-23 23:10:50 +00:00
|
|
|
if (ha->sfp_data)
|
2017-08-23 22:05:07 +00:00
|
|
|
dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
|
|
|
|
ha->sfp_data_dma);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->sfp_data = NULL;
|
2006-06-23 23:10:50 +00:00
|
|
|
|
2019-03-12 18:08:22 +00:00
|
|
|
if (ha->flt)
|
2019-11-06 04:42:26 +00:00
|
|
|
dma_free_coherent(&ha->pdev->dev,
|
|
|
|
sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
|
2019-03-12 18:08:22 +00:00
|
|
|
ha->flt, ha->flt_dma);
|
2019-04-17 21:44:23 +00:00
|
|
|
ha->flt = NULL;
|
|
|
|
ha->flt_dma = 0;
|
2019-03-12 18:08:22 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (ha->ms_iocb)
|
|
|
|
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->ms_iocb = NULL;
|
|
|
|
ha->ms_iocb_dma = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-06-30 10:22:29 +00:00
|
|
|
if (ha->sf_init_cb)
|
|
|
|
dma_pool_free(ha->s_dma_pool,
|
|
|
|
ha->sf_init_cb, ha->sf_init_cb_dma);
|
|
|
|
|
2009-03-24 16:08:01 +00:00
|
|
|
if (ha->ex_init_cb)
|
2010-04-13 00:59:55 +00:00
|
|
|
dma_pool_free(ha->s_dma_pool,
|
|
|
|
ha->ex_init_cb, ha->ex_init_cb_dma);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->ex_init_cb = NULL;
|
|
|
|
ha->ex_init_cb_dma = 0;
|
2009-03-24 16:08:01 +00:00
|
|
|
|
2010-05-04 22:01:26 +00:00
|
|
|
if (ha->async_pd)
|
|
|
|
dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->async_pd = NULL;
|
|
|
|
ha->async_pd_dma = 0;
|
2010-05-04 22:01:26 +00:00
|
|
|
|
2018-12-02 20:52:11 +00:00
|
|
|
dma_pool_destroy(ha->s_dma_pool);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->s_dma_pool = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (ha->gid_list)
|
2012-02-09 19:15:57 +00:00
|
|
|
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
|
|
|
|
ha->gid_list, ha->gid_list_dma);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->gid_list = NULL;
|
|
|
|
ha->gid_list_dma = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-04-13 00:59:55 +00:00
|
|
|
if (IS_QLA82XX(ha)) {
|
|
|
|
if (!list_empty(&ha->gbl_dsd_list)) {
|
|
|
|
struct dsd_dma *dsd_ptr, *tdsd_ptr;
|
|
|
|
|
|
|
|
/* clean up allocated prev pool */
|
|
|
|
list_for_each_entry_safe(dsd_ptr,
|
|
|
|
tdsd_ptr, &ha->gbl_dsd_list, list) {
|
|
|
|
dma_pool_free(ha->dl_dma_pool,
|
|
|
|
dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
|
|
|
|
list_del(&dsd_ptr->list);
|
|
|
|
kfree(dsd_ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-02 20:52:11 +00:00
|
|
|
dma_pool_destroy(ha->dl_dma_pool);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->dl_dma_pool = NULL;
|
2010-04-13 00:59:55 +00:00
|
|
|
|
2018-12-02 20:52:11 +00:00
|
|
|
dma_pool_destroy(ha->fcp_cmnd_dma_pool);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->fcp_cmnd_dma_pool = NULL;
|
2010-04-13 00:59:55 +00:00
|
|
|
|
2018-12-02 20:52:11 +00:00
|
|
|
mempool_destroy(ha->ctx_mempool);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->ctx_mempool = NULL;
|
2010-04-13 00:59:55 +00:00
|
|
|
|
2019-07-26 16:07:35 +00:00
|
|
|
if (ql2xenabledif && ha->dif_bundl_pool) {
|
2018-12-21 17:33:45 +00:00
|
|
|
struct dsd_dma *dsd, *nxt;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
|
|
|
|
list) {
|
|
|
|
list_del(&dsd->list);
|
|
|
|
dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
|
|
|
|
dsd->dsd_list_dma);
|
|
|
|
ha->dif_bundle_dma_allocs--;
|
|
|
|
kfree(dsd);
|
|
|
|
ha->dif_bundle_kallocs--;
|
|
|
|
ha->pool.unusable.count--;
|
|
|
|
}
|
|
|
|
list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
|
|
|
|
list_del(&dsd->list);
|
|
|
|
dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
|
|
|
|
dsd->dsd_list_dma);
|
|
|
|
ha->dif_bundle_dma_allocs--;
|
|
|
|
kfree(dsd);
|
|
|
|
ha->dif_bundle_kallocs--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-11 14:13:17 +00:00
|
|
|
dma_pool_destroy(ha->dif_bundl_pool);
|
2019-04-17 21:44:23 +00:00
|
|
|
ha->dif_bundl_pool = NULL;
|
2018-12-21 17:33:45 +00:00
|
|
|
|
2012-05-15 18:34:28 +00:00
|
|
|
qlt_mem_free(ha);
|
2021-06-24 05:25:59 +00:00
|
|
|
qla_remove_hostmap(ha);
|
2012-05-15 18:34:28 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
if (ha->init_cb)
|
|
|
|
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
|
2010-04-13 00:59:55 +00:00
|
|
|
ha->init_cb, ha->init_cb_dma);
|
2021-06-24 05:25:58 +00:00
|
|
|
|
|
|
|
dma_pool_destroy(ha->purex_dma_pool);
|
|
|
|
ha->purex_dma_pool = NULL;
|
|
|
|
|
|
|
|
if (ha->elsrej.c) {
|
|
|
|
dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
|
|
|
|
ha->elsrej.c, ha->elsrej.cdma);
|
|
|
|
ha->elsrej.c = NULL;
|
|
|
|
}
|
|
|
|
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->init_cb = NULL;
|
|
|
|
ha->init_cb_dma = 0;
|
2018-03-05 05:02:55 +00:00
|
|
|
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
vfree(ha->optrom_buffer);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->optrom_buffer = NULL;
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
kfree(ha->nvram);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->nvram = NULL;
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
kfree(ha->npiv_info);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->npiv_info = NULL;
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
kfree(ha->swl);
|
2019-04-17 21:44:22 +00:00
|
|
|
ha->swl = NULL;
|
scsi: qla2xxx: Fix small memory leak in qla2x00_probe_one on probe failure
The code that fixes the crashes in the following commit introduced a small
memory leak:
commit 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Fixing this requires a bit of reworking, which I've explained. Also provide
some code cleanup.
There is a small window in qla2x00_probe_one where if qla2x00_alloc_queues
fails, we end up never freeing req and rsp and leak 0xc0 and 0xc8 bytes
respectively (the sizes of req and rsp).
I originally put in checks to test for this condition which were based on
the incorrect assumption that if ha->rsp_q_map and ha->req_q_map were
allocated, then rsp and req were allocated as well. This is incorrect.
There is a window between these allocations:
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
goto probe_hw_failed;
[if successful, both rsp and req allocated]
base_vha = qla2x00_create_host(sht, ha);
goto probe_hw_failed;
ret = qla2x00_request_irqs(ha, rsp);
goto probe_failed;
if (qla2x00_alloc_queues(ha, req, rsp)) {
goto probe_failed;
[if successful, now ha->rsp_q_map and ha->req_q_map allocated]
To simplify this, we should just set req and rsp to NULL after we free
them. Sounds simple enough? The problem is that req and rsp are pointers
defined in the qla2x00_probe_one and they are not always passed by reference
to the routines that free them.
Here are paths which can free req and rsp:
PATH 1:
qla2x00_probe_one
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
[req and rsp are passed by reference, but if this fails, we currently
do not NULL out req and rsp. Easily fixed]
PATH 2:
qla2x00_probe_one
failing in qla2x00_request_irqs or qla2x00_alloc_queues
probe_failed:
qla2x00_free_device(base_vha);
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 3:
qla2x00_probe_one:
failing in qla2x00_mem_alloc or qla2x00_create_host
probe_hw_failed:
qla2x00_free_req_que(ha, req)
qla2x00_free_rsp_que(ha, rsp)
PATH 1: This should currently work, but it doesn't because rsp and rsp are
not set to NULL in qla2x00_mem_alloc. Easily remedied.
PATH 2: req and rsp aren't passed in at all to qla2x00_free_device but are
derived from ha->req_q_map[0] and ha->rsp_q_map[0]. These are only set up if
qla2x00_alloc_queues succeeds.
In qla2x00_free_queues, we are protected from crashing if these don't exist
because req_qid_map and rsp_qid_map are only set on their allocation. We are
guarded in this way:
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
PATH 3: This works. We haven't freed req or rsp yet (or they were never
allocated if qla2x00_mem_alloc failed), so we'll attempt to free them here.
To summarize, there are a few small changes to make this work correctly and
(and for some cleanup):
1) (For PATH 1) Set *rsp and *req to NULL in case of failure in
qla2x00_mem_alloc so these are correctly set to NULL back in
qla2x00_probe_one
2) After jumping to probe_failed: and calling qla2x00_free_device,
explicitly set rsp and req to NULL so further calls with these pointers do
not crash, i.e. the free queue calls in the probe_hw_failed section we fall
through to.
3) Fix return code check in the call to qla2x00_alloc_queues. We currently
drop the return code on the floor. The probe fails but the caller of the
probe doesn't have an error code, so it attaches to pci. This can result in
a crash on module shutdown.
4) Remove unnecessary NULL checks in qla2x00_free_req_que,
qla2x00_free_rsp_que, and the egregious NULL checks before kfrees and vfrees
in qla2x00_mem_free.
I tested this out running a scenario where the card breaks at various times
during initialization. I made sure I forced every error exit path in
qla2x00_probe_one.
Cc: <stable@vger.kernel.org> # v4.16
Fixes: 6a2cf8d3663e ("scsi: qla2xxx: Fix crashes in qla2x00_probe_one on probe failure")
Signed-off-by: Bill Kuzeja <william.kuzeja@stratus.com>
Acked-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-03-23 14:37:25 +00:00
|
|
|
kfree(ha->loop_id_map);
|
2020-06-30 10:22:29 +00:00
|
|
|
ha->sf_init_cb = NULL;
|
|
|
|
ha->sf_init_cb_dma = 0;
|
2018-03-05 05:02:55 +00:00
|
|
|
ha->loop_id_map = NULL;
|
2008-11-06 18:40:51 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
|
|
|
|
struct qla_hw_data *ha)
|
|
|
|
{
|
|
|
|
struct Scsi_Host *host;
|
|
|
|
struct scsi_qla_host *vha = NULL;
|
2006-02-01 00:05:17 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
|
2017-01-20 06:28:03 +00:00
|
|
|
if (!host) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
|
|
|
|
"Failed to allocate host from the scsi layer, aborting.\n");
|
2017-01-20 06:28:03 +00:00
|
|
|
return NULL;
|
2008-11-06 18:40:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear our data area */
|
|
|
|
vha = shost_priv(host);
|
|
|
|
memset(vha, 0, sizeof(scsi_qla_host_t));
|
|
|
|
|
|
|
|
vha->host = host;
|
|
|
|
vha->host_no = host->host_no;
|
|
|
|
vha->hw = ha;
|
|
|
|
|
2018-09-11 17:18:18 +00:00
|
|
|
vha->qlini_mode = ql2x_ini_mode;
|
|
|
|
vha->ql2xexchoffld = ql2xexchoffld;
|
|
|
|
vha->ql2xiniexchg = ql2xiniexchg;
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
INIT_LIST_HEAD(&vha->vp_fcports);
|
|
|
|
INIT_LIST_HEAD(&vha->work_list);
|
|
|
|
INIT_LIST_HEAD(&vha->list);
|
2015-07-14 20:00:43 +00:00
|
|
|
INIT_LIST_HEAD(&vha->qla_cmd_list);
|
|
|
|
INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
|
2015-12-17 19:57:01 +00:00
|
|
|
INIT_LIST_HEAD(&vha->logo_list);
|
2015-12-17 19:57:02 +00:00
|
|
|
INIT_LIST_HEAD(&vha->plogi_ack_list);
|
2016-12-12 22:40:07 +00:00
|
|
|
INIT_LIST_HEAD(&vha->qp_list);
|
2017-01-20 06:28:03 +00:00
|
|
|
INIT_LIST_HEAD(&vha->gnl.fcports);
|
2017-12-04 22:45:02 +00:00
|
|
|
INIT_LIST_HEAD(&vha->gpnid_list);
|
2017-12-28 20:33:16 +00:00
|
|
|
INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
|
2008-11-06 18:40:51 +00:00
|
|
|
|
2020-02-12 21:44:24 +00:00
|
|
|
INIT_LIST_HEAD(&vha->purex_list.head);
|
|
|
|
spin_lock_init(&vha->purex_list.lock);
|
|
|
|
|
2009-06-03 16:55:28 +00:00
|
|
|
spin_lock_init(&vha->work_lock);
|
2015-07-14 20:00:43 +00:00
|
|
|
spin_lock_init(&vha->cmd_list_lock);
|
2017-01-20 06:28:00 +00:00
|
|
|
init_waitqueue_head(&vha->fcport_waitQ);
|
2017-03-15 16:48:43 +00:00
|
|
|
init_waitqueue_head(&vha->vref_waitq);
|
2021-06-24 05:25:58 +00:00
|
|
|
qla_enode_init(vha);
|
2021-06-24 05:26:03 +00:00
|
|
|
qla_edb_init(vha);
|
|
|
|
|
2009-06-03 16:55:28 +00:00
|
|
|
|
2017-01-20 21:31:13 +00:00
|
|
|
vha->gnl.size = sizeof(struct get_name_list_extended) *
|
|
|
|
(ha->max_loop_id + 1);
|
2017-01-20 06:28:03 +00:00
|
|
|
vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev,
|
|
|
|
vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL);
|
|
|
|
if (!vha->gnl.l) {
|
2017-06-02 16:12:01 +00:00
|
|
|
ql_log(ql_log_fatal, vha, 0xd04a,
|
2017-01-20 06:28:03 +00:00
|
|
|
"Alloc failed for name list.\n");
|
2019-07-26 16:07:35 +00:00
|
|
|
scsi_host_put(vha->host);
|
2017-01-20 06:28:03 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2009-06-03 16:55:28 +00:00
|
|
|
|
2017-12-28 20:33:26 +00:00
|
|
|
/* todo: what about ext login? */
|
|
|
|
vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
|
|
|
|
vha->scan.l = vmalloc(vha->scan.size);
|
|
|
|
if (!vha->scan.l) {
|
|
|
|
ql_log(ql_log_fatal, vha, 0xd04a,
|
|
|
|
"Alloc failed for scan database.\n");
|
|
|
|
dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
|
|
|
|
vha->gnl.l, vha->gnl.ldma);
|
2019-08-14 14:24:41 +00:00
|
|
|
vha->gnl.l = NULL;
|
2019-07-26 16:07:35 +00:00
|
|
|
scsi_host_put(vha->host);
|
2017-12-28 20:33:26 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2017-12-28 20:33:35 +00:00
|
|
|
INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
|
2017-12-28 20:33:26 +00:00
|
|
|
|
2020-09-30 02:25:14 +00:00
|
|
|
sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_init, vha, 0x0041,
|
|
|
|
"Allocated the host=%p hw=%p vha=%p dev_name=%s",
|
|
|
|
vha->host, vha->hw, vha,
|
|
|
|
dev_name(&(ha->pdev->dev)));
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
return vha;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2017-01-20 06:28:00 +00:00
|
|
|
struct qla_work_evt *
|
2009-06-03 16:55:28 +00:00
|
|
|
qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
|
2008-04-03 20:13:18 +00:00
|
|
|
{
|
|
|
|
struct qla_work_evt *e;
|
2010-09-03 21:57:00 +00:00
|
|
|
uint8_t bail;
|
|
|
|
|
2020-04-21 20:46:21 +00:00
|
|
|
if (test_bit(UNLOADING, &vha->dpc_flags))
|
|
|
|
return NULL;
|
|
|
|
|
2010-09-03 21:57:00 +00:00
|
|
|
QLA_VHA_MARK_BUSY(vha, bail);
|
|
|
|
if (bail)
|
|
|
|
return NULL;
|
2008-04-03 20:13:18 +00:00
|
|
|
|
2009-06-03 16:55:28 +00:00
|
|
|
e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
|
2010-09-03 21:57:00 +00:00
|
|
|
if (!e) {
|
|
|
|
QLA_VHA_MARK_NOT_BUSY(vha);
|
2008-04-03 20:13:18 +00:00
|
|
|
return NULL;
|
2010-09-03 21:57:00 +00:00
|
|
|
}
|
2008-04-03 20:13:18 +00:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&e->list);
|
|
|
|
e->type = type;
|
|
|
|
e->flags = QLA_EVT_FLAG_FREE;
|
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
2017-01-20 06:28:00 +00:00
|
|
|
int
|
2009-06-03 16:55:28 +00:00
|
|
|
qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
|
2008-04-03 20:13:18 +00:00
|
|
|
{
|
2009-06-03 16:55:28 +00:00
|
|
|
unsigned long flags;
|
2017-12-28 20:33:16 +00:00
|
|
|
bool q = false;
|
2008-04-03 20:13:18 +00:00
|
|
|
|
2009-06-03 16:55:28 +00:00
|
|
|
spin_lock_irqsave(&vha->work_lock, flags);
|
2008-11-06 18:40:51 +00:00
|
|
|
list_add_tail(&e->list, &vha->work_list);
|
2017-12-28 20:33:16 +00:00
|
|
|
|
|
|
|
if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
|
|
|
|
q = true;
|
|
|
|
|
2009-06-03 16:55:28 +00:00
|
|
|
spin_unlock_irqrestore(&vha->work_lock, flags);
|
2017-03-15 16:48:55 +00:00
|
|
|
|
2017-12-28 20:33:16 +00:00
|
|
|
if (q)
|
|
|
|
queue_work(vha->hw->wq, &vha->iocb_work);
|
2009-06-03 16:55:28 +00:00
|
|
|
|
2008-04-03 20:13:18 +00:00
|
|
|
return QLA_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
|
2008-04-03 20:13:18 +00:00
|
|
|
u32 data)
|
|
|
|
{
|
|
|
|
struct qla_work_evt *e;
|
|
|
|
|
2009-06-03 16:55:28 +00:00
|
|
|
e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
|
2008-04-03 20:13:18 +00:00
|
|
|
if (!e)
|
|
|
|
return QLA_FUNCTION_FAILED;
|
|
|
|
|
|
|
|
e->u.aen.code = code;
|
|
|
|
e->u.aen.data = data;
|
2009-06-03 16:55:28 +00:00
|
|
|
return qla2x00_post_work(vha, e);
|
2008-04-03 20:13:18 +00:00
|
|
|
}
|
|
|
|
|
2009-02-09 04:50:12 +00:00
|
|
|
int
|
|
|
|
qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
|
|
|
|
{
|
|
|
|
struct qla_work_evt *e;
|
|
|
|
|
2009-06-03 16:55:28 +00:00
|
|
|
e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
|
2009-02-09 04:50:12 +00:00
|
|
|
if (!e)
|
|
|
|
return QLA_FUNCTION_FAILED;
|
|
|
|
|
|
|
|
memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
|
2009-06-03 16:55:28 +00:00
|
|
|
return qla2x00_post_work(vha, e);
|
2009-02-09 04:50:12 +00:00
|
|
|
}
|
|
|
|
|
2009-08-20 18:06:05 +00:00
|
|
|
#define qla2x00_post_async_work(name, type) \
|
|
|
|
int qla2x00_post_async_##name##_work( \
|
|
|
|
struct scsi_qla_host *vha, \
|
|
|
|
fc_port_t *fcport, uint16_t *data) \
|
|
|
|
{ \
|
|
|
|
struct qla_work_evt *e; \
|
|
|
|
\
|
|
|
|
e = qla2x00_alloc_work(vha, type); \
|
|
|
|
if (!e) \
|
|
|
|
return QLA_FUNCTION_FAILED; \
|
|
|
|
\
|
|
|
|
e->u.logio.fcport = fcport; \
|
|
|
|
if (data) { \
|
|
|
|
e->u.logio.data[0] = data[0]; \
|
|
|
|
e->u.logio.data[1] = data[1]; \
|
|
|
|
} \
|
2017-12-28 20:33:41 +00:00
|
|
|
fcport->flags |= FCF_ASYNC_ACTIVE; \
|
2009-08-20 18:06:05 +00:00
|
|
|
return qla2x00_post_work(vha, e); \
|
|
|
|
}
|
|
|
|
|
|
|
|
qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
|
|
|
|
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
|
2010-05-04 22:01:26 +00:00
|
|
|
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
|
2017-12-28 20:33:20 +00:00
|
|
|
qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
|
|
|
|
qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
|
2009-08-20 18:06:05 +00:00
|
|
|
|
2009-10-13 22:16:45 +00:00
|
|
|
int
|
|
|
|
qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
|
|
|
|
{
|
|
|
|
struct qla_work_evt *e;
|
|
|
|
|
|
|
|
e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
|
|
|
|
if (!e)
|
|
|
|
return QLA_FUNCTION_FAILED;
|
|
|
|
|
|
|
|
e->u.uevent.code = code;
|
|
|
|
return qla2x00_post_work(vha, e);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
|
|
|
|
{
|
|
|
|
char event_string[40];
|
|
|
|
char *envp[] = { event_string, NULL };
|
|
|
|
|
|
|
|
switch (code) {
|
|
|
|
case QLA_UEVENT_CODE_FW_DUMP:
|
2020-09-30 02:25:14 +00:00
|
|
|
snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
|
2009-10-13 22:16:45 +00:00
|
|
|
vha->host_no);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* do nothing */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
|
|
|
|
}
|
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
int
|
|
|
|
qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
|
|
|
|
uint32_t *data, int cnt)
|
|
|
|
{
|
|
|
|
struct qla_work_evt *e;
|
|
|
|
|
|
|
|
e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
|
|
|
|
if (!e)
|
|
|
|
return QLA_FUNCTION_FAILED;
|
|
|
|
|
|
|
|
e->u.aenfx.evtcode = evtcode;
|
|
|
|
e->u.aenfx.count = cnt;
|
|
|
|
memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
|
|
|
|
return qla2x00_post_work(vha, e);
|
|
|
|
}
|
|
|
|
|
2018-08-31 18:24:31 +00:00
|
|
|
void qla24xx_sched_upd_fcport(fc_port_t *fcport)
|
2017-01-20 06:28:00 +00:00
|
|
|
{
|
2018-08-31 18:24:31 +00:00
|
|
|
unsigned long flags;
|
2017-01-20 06:28:00 +00:00
|
|
|
|
2018-08-31 18:24:31 +00:00
|
|
|
if (IS_SW_RESV_ADDR(fcport->d_id))
|
|
|
|
return;
|
2017-01-20 06:28:00 +00:00
|
|
|
|
2018-08-31 18:24:31 +00:00
|
|
|
spin_lock_irqsave(&fcport->vha->work_lock, flags);
|
|
|
|
if (fcport->disc_state == DSC_UPD_FCPORT) {
|
|
|
|
spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
fcport->jiffies_at_registration = jiffies;
|
|
|
|
fcport->sec_since_registration = 0;
|
|
|
|
fcport->next_disc_state = DSC_DELETED;
|
2019-12-17 22:06:06 +00:00
|
|
|
qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
|
2018-08-31 18:24:31 +00:00
|
|
|
spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
|
|
|
|
|
|
|
|
queue_work(system_unbound_wq, &fcport->reg_work);
|
2017-01-20 06:28:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
2017-08-30 17:16:49 +00:00
|
|
|
fc_port_t *fcport = NULL, *tfcp;
|
2017-01-20 06:28:00 +00:00
|
|
|
struct qlt_plogi_ack_t *pla =
|
|
|
|
(struct qlt_plogi_ack_t *)e->u.new_sess.pla;
|
2017-08-30 17:16:49 +00:00
|
|
|
uint8_t free_fcport = 0;
|
2017-01-20 06:28:00 +00:00
|
|
|
|
2017-12-28 20:33:24 +00:00
|
|
|
ql_dbg(ql_dbg_disc, vha, 0xffff,
|
|
|
|
"%s %d %8phC enter\n",
|
|
|
|
__func__, __LINE__, e->u.new_sess.port_name);
|
|
|
|
|
2017-01-20 06:28:00 +00:00
|
|
|
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
|
|
|
|
fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
|
|
|
|
if (fcport) {
|
|
|
|
fcport->d_id = e->u.new_sess.id;
|
|
|
|
if (pla) {
|
|
|
|
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
|
2017-12-28 20:33:16 +00:00
|
|
|
memcpy(fcport->node_name,
|
|
|
|
pla->iocb.u.isp24.u.plogi.node_name,
|
|
|
|
WWN_SIZE);
|
2017-01-20 06:28:00 +00:00
|
|
|
qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
|
|
|
|
/* we took an extra ref_count to prevent PLOGI ACK when
|
|
|
|
* fcport/sess has not been created.
|
|
|
|
*/
|
|
|
|
pla->ref_count--;
|
|
|
|
}
|
|
|
|
} else {
|
2017-08-30 17:16:49 +00:00
|
|
|
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
|
2017-01-20 06:28:00 +00:00
|
|
|
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
|
|
|
|
if (fcport) {
|
|
|
|
fcport->d_id = e->u.new_sess.id;
|
|
|
|
fcport->flags |= FCF_FABRIC_DEVICE;
|
|
|
|
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
|
2021-01-11 09:31:28 +00:00
|
|
|
fcport->tgt_short_link_down_cnt = 0;
|
2018-03-21 06:09:40 +00:00
|
|
|
|
2017-01-20 06:28:00 +00:00
|
|
|
memcpy(fcport->port_name, e->u.new_sess.port_name,
|
|
|
|
WWN_SIZE);
|
2019-09-12 18:09:09 +00:00
|
|
|
|
2019-09-12 18:09:12 +00:00
|
|
|
fcport->fc4_type = e->u.new_sess.fc4_type;
|
2021-08-17 05:13:12 +00:00
|
|
|
if (NVME_PRIORITY(vha->hw, fcport))
|
|
|
|
fcport->do_prli_nvme = 1;
|
|
|
|
else
|
|
|
|
fcport->do_prli_nvme = 0;
|
|
|
|
|
2019-09-12 18:09:12 +00:00
|
|
|
if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
|
2020-09-29 10:21:51 +00:00
|
|
|
fcport->dm_login_expire = jiffies +
|
|
|
|
QLA_N2N_WAIT_TIME * HZ;
|
2019-09-12 18:09:12 +00:00
|
|
|
fcport->fc4_type = FS_FC4TYPE_FCP;
|
2019-09-12 18:09:09 +00:00
|
|
|
fcport->n2n_flag = 1;
|
2019-09-12 18:09:12 +00:00
|
|
|
if (vha->flags.nvme_enabled)
|
|
|
|
fcport->fc4_type |= FS_FC4TYPE_NVME;
|
|
|
|
}
|
2019-09-12 18:09:09 +00:00
|
|
|
|
2017-08-30 17:16:49 +00:00
|
|
|
} else {
|
|
|
|
ql_dbg(ql_dbg_disc, vha, 0xffff,
|
|
|
|
"%s %8phC mem alloc fail.\n",
|
|
|
|
__func__, e->u.new_sess.port_name);
|
|
|
|
|
2019-08-09 03:01:42 +00:00
|
|
|
if (pla) {
|
|
|
|
list_del(&pla->list);
|
2017-08-30 17:16:49 +00:00
|
|
|
kmem_cache_free(qla_tgt_plogi_cachep, pla);
|
2019-08-09 03:01:42 +00:00
|
|
|
}
|
2017-08-30 17:16:49 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
|
2017-12-28 20:33:26 +00:00
|
|
|
/* search again to make sure no one else got ahead */
|
2017-08-30 17:16:49 +00:00
|
|
|
tfcp = qla2x00_find_fcport_by_wwpn(vha,
|
|
|
|
e->u.new_sess.port_name, 1);
|
|
|
|
if (tfcp) {
|
|
|
|
/* should rarily happen */
|
|
|
|
ql_dbg(ql_dbg_disc, vha, 0xffff,
|
|
|
|
"%s %8phC found existing fcport b4 add. DS %d LS %d\n",
|
|
|
|
__func__, tfcp->port_name, tfcp->disc_state,
|
|
|
|
tfcp->fw_login_state);
|
|
|
|
|
|
|
|
free_fcport = 1;
|
|
|
|
} else {
|
2017-01-20 06:28:00 +00:00
|
|
|
list_add_tail(&fcport->list, &vha->vp_fcports);
|
|
|
|
|
2017-12-04 22:45:15 +00:00
|
|
|
}
|
|
|
|
if (pla) {
|
|
|
|
qlt_plogi_ack_link(vha, pla, fcport,
|
|
|
|
QLT_PLOGI_LINK_SAME_WWN);
|
|
|
|
pla->ref_count--;
|
2017-01-20 06:28:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
|
|
|
|
|
|
|
|
if (fcport) {
|
2017-12-28 20:33:26 +00:00
|
|
|
fcport->id_changed = 1;
|
|
|
|
fcport->scan_state = QLA_FCPORT_FOUND;
|
2019-07-26 16:07:32 +00:00
|
|
|
fcport->chip_reset = vha->hw->base_qpair->chip_reset;
|
2017-12-28 20:33:26 +00:00
|
|
|
memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
|
|
|
|
|
2017-12-04 22:45:05 +00:00
|
|
|
if (pla) {
|
2017-12-28 20:33:24 +00:00
|
|
|
if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
|
|
|
|
u16 wd3_lo;
|
|
|
|
|
|
|
|
fcport->fw_login_state = DSC_LS_PRLI_PEND;
|
|
|
|
fcport->local = 0;
|
|
|
|
fcport->loop_id =
|
|
|
|
le16_to_cpu(
|
|
|
|
pla->iocb.u.isp24.nport_handle);
|
|
|
|
fcport->fw_login_state = DSC_LS_PRLI_PEND;
|
|
|
|
wd3_lo =
|
|
|
|
le16_to_cpu(
|
|
|
|
pla->iocb.u.isp24.u.prli.wd3_lo);
|
|
|
|
|
|
|
|
if (wd3_lo & BIT_7)
|
|
|
|
fcport->conf_compl_supported = 1;
|
|
|
|
|
|
|
|
if ((wd3_lo & BIT_4) == 0)
|
|
|
|
fcport->port_type = FCT_INITIATOR;
|
|
|
|
else
|
|
|
|
fcport->port_type = FCT_TARGET;
|
|
|
|
}
|
2017-01-20 06:28:00 +00:00
|
|
|
qlt_plogi_ack_unref(vha, pla);
|
2017-12-04 22:45:05 +00:00
|
|
|
} else {
|
2018-02-22 08:49:35 +00:00
|
|
|
fc_port_t *dfcp = NULL;
|
|
|
|
|
2017-12-04 22:45:05 +00:00
|
|
|
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
|
|
|
|
tfcp = qla2x00_find_fcport_by_nportid(vha,
|
|
|
|
&e->u.new_sess.id, 1);
|
|
|
|
if (tfcp && (tfcp != fcport)) {
|
|
|
|
/*
|
|
|
|
* We have a conflict fcport with same NportID.
|
|
|
|
*/
|
|
|
|
ql_dbg(ql_dbg_disc, vha, 0xffff,
|
|
|
|
"%s %8phC found conflict b4 add. DS %d LS %d\n",
|
|
|
|
__func__, tfcp->port_name, tfcp->disc_state,
|
|
|
|
tfcp->fw_login_state);
|
|
|
|
|
|
|
|
switch (tfcp->disc_state) {
|
|
|
|
case DSC_DELETED:
|
|
|
|
break;
|
|
|
|
case DSC_DELETE_PEND:
|
|
|
|
fcport->login_pause = 1;
|
|
|
|
tfcp->conflict = fcport;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fcport->login_pause = 1;
|
|
|
|
tfcp->conflict = fcport;
|
2018-02-22 08:49:35 +00:00
|
|
|
dfcp = tfcp;
|
2017-12-04 22:45:05 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
|
2018-02-22 08:49:35 +00:00
|
|
|
if (dfcp)
|
|
|
|
qlt_schedule_sess_for_deletion(tfcp);
|
2017-12-28 20:33:26 +00:00
|
|
|
|
2018-08-02 20:16:57 +00:00
|
|
|
if (N2N_TOPO(vha->hw)) {
|
2019-09-12 18:09:10 +00:00
|
|
|
fcport->flags &= ~FCF_FABRIC_DEVICE;
|
|
|
|
fcport->keep_nport_handle = 1;
|
2018-08-02 20:16:57 +00:00
|
|
|
if (vha->flags.nvme_enabled) {
|
2019-09-12 18:09:12 +00:00
|
|
|
fcport->fc4_type =
|
|
|
|
(FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
|
2018-08-02 20:16:57 +00:00
|
|
|
fcport->n2n_flag = 1;
|
|
|
|
}
|
|
|
|
fcport->fw_login_state = 0;
|
2020-02-26 22:40:18 +00:00
|
|
|
|
|
|
|
schedule_delayed_work(&vha->scan.scan_work, 5);
|
2018-08-02 20:16:57 +00:00
|
|
|
} else {
|
|
|
|
qla24xx_fcport_handle_login(vha, fcport);
|
|
|
|
}
|
2017-12-04 22:45:05 +00:00
|
|
|
}
|
2017-01-20 06:28:00 +00:00
|
|
|
}
|
2017-08-30 17:16:49 +00:00
|
|
|
|
|
|
|
if (free_fcport) {
|
|
|
|
qla2x00_free_fcport(fcport);
|
2019-08-09 03:01:42 +00:00
|
|
|
if (pla) {
|
|
|
|
list_del(&pla->list);
|
2017-08-30 17:16:49 +00:00
|
|
|
kmem_cache_free(qla_tgt_plogi_cachep, pla);
|
2019-08-09 03:01:42 +00:00
|
|
|
}
|
2017-08-30 17:16:49 +00:00
|
|
|
}
|
2017-01-20 06:28:00 +00:00
|
|
|
}
|
|
|
|
|
2017-12-28 20:33:31 +00:00
|
|
|
static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
|
|
|
|
{
|
|
|
|
struct srb *sp = e->u.iosb.sp;
|
|
|
|
int rval;
|
|
|
|
|
|
|
|
rval = qla2x00_start_sp(sp);
|
|
|
|
if (rval != QLA_SUCCESS) {
|
|
|
|
ql_dbg(ql_dbg_disc, vha, 0x2043,
|
|
|
|
"%s: %s: Re-issue IOCB failed (%d).\n",
|
|
|
|
__func__, sp->name, rval);
|
|
|
|
qla24xx_sp_unmap(vha, sp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-20 18:06:05 +00:00
|
|
|
void
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_do_work(struct scsi_qla_host *vha)
|
2008-04-03 20:13:18 +00:00
|
|
|
{
|
2009-06-03 16:55:28 +00:00
|
|
|
struct qla_work_evt *e, *tmp;
|
|
|
|
unsigned long flags;
|
|
|
|
LIST_HEAD(work);
|
2019-01-25 07:23:42 +00:00
|
|
|
int rc;
|
2008-04-03 20:13:18 +00:00
|
|
|
|
2009-06-03 16:55:28 +00:00
|
|
|
spin_lock_irqsave(&vha->work_lock, flags);
|
|
|
|
list_splice_init(&vha->work_list, &work);
|
|
|
|
spin_unlock_irqrestore(&vha->work_lock, flags);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(e, tmp, &work, list) {
|
2019-01-25 07:23:42 +00:00
|
|
|
rc = QLA_SUCCESS;
|
2008-04-03 20:13:18 +00:00
|
|
|
switch (e->type) {
|
|
|
|
case QLA_EVT_AEN:
|
2008-11-06 18:40:51 +00:00
|
|
|
fc_host_post_event(vha->host, fc_get_event_number(),
|
2008-04-03 20:13:18 +00:00
|
|
|
e->u.aen.code, e->u.aen.data);
|
|
|
|
break;
|
2009-02-09 04:50:12 +00:00
|
|
|
case QLA_EVT_IDC_ACK:
|
|
|
|
qla81xx_idc_ack(vha, e->u.idc_ack.mb);
|
|
|
|
break;
|
2009-08-20 18:06:05 +00:00
|
|
|
case QLA_EVT_ASYNC_LOGIN:
|
|
|
|
qla2x00_async_login(vha, e->u.logio.fcport,
|
|
|
|
e->u.logio.data);
|
|
|
|
break;
|
|
|
|
case QLA_EVT_ASYNC_LOGOUT:
|
2019-01-25 07:23:42 +00:00
|
|
|
rc = qla2x00_async_logout(vha, e->u.logio.fcport);
|
2009-08-20 18:06:05 +00:00
|
|
|
break;
|
2010-05-04 22:01:26 +00:00
|
|
|
case QLA_EVT_ASYNC_ADISC:
|
|
|
|
qla2x00_async_adisc(vha, e->u.logio.fcport,
|
|
|
|
e->u.logio.data);
|
|
|
|
break;
|
2009-10-13 22:16:45 +00:00
|
|
|
case QLA_EVT_UEVENT:
|
|
|
|
qla2x00_uevent_emit(vha, e->u.uevent.code);
|
|
|
|
break;
|
2013-03-28 12:21:23 +00:00
|
|
|
case QLA_EVT_AENFX:
|
|
|
|
qlafx00_process_aen(vha, e);
|
|
|
|
break;
|
2017-01-20 06:28:00 +00:00
|
|
|
case QLA_EVT_GPNID:
|
|
|
|
qla24xx_async_gpnid(vha, &e->u.gpnid.id);
|
|
|
|
break;
|
2017-12-28 20:33:31 +00:00
|
|
|
case QLA_EVT_UNMAP:
|
|
|
|
qla24xx_sp_unmap(vha, e->u.iosb.sp);
|
2017-01-20 06:28:00 +00:00
|
|
|
break;
|
2017-12-28 20:33:16 +00:00
|
|
|
case QLA_EVT_RELOGIN:
|
|
|
|
qla2x00_relogin(vha);
|
|
|
|
break;
|
2017-01-20 06:28:00 +00:00
|
|
|
case QLA_EVT_NEW_SESS:
|
|
|
|
qla24xx_create_new_sess(vha, e);
|
|
|
|
break;
|
|
|
|
case QLA_EVT_GPDB:
|
|
|
|
qla24xx_async_gpdb(vha, e->u.fcport.fcport,
|
|
|
|
e->u.fcport.opt);
|
|
|
|
break;
|
2017-06-21 20:48:41 +00:00
|
|
|
case QLA_EVT_PRLI:
|
|
|
|
qla24xx_async_prli(vha, e->u.fcport.fcport);
|
|
|
|
break;
|
2017-01-20 06:28:00 +00:00
|
|
|
case QLA_EVT_GPSC:
|
|
|
|
qla24xx_async_gpsc(vha, e->u.fcport.fcport);
|
|
|
|
break;
|
|
|
|
case QLA_EVT_GNL:
|
|
|
|
qla24xx_async_gnl(vha, e->u.fcport.fcport);
|
|
|
|
break;
|
|
|
|
case QLA_EVT_NACK:
|
|
|
|
qla24xx_do_nack_work(vha, e);
|
|
|
|
break;
|
2017-12-28 20:33:20 +00:00
|
|
|
case QLA_EVT_ASYNC_PRLO:
|
2019-01-25 07:23:42 +00:00
|
|
|
rc = qla2x00_async_prlo(vha, e->u.logio.fcport);
|
2017-12-28 20:33:20 +00:00
|
|
|
break;
|
|
|
|
case QLA_EVT_ASYNC_PRLO_DONE:
|
|
|
|
qla2x00_async_prlo_done(vha, e->u.logio.fcport,
|
|
|
|
e->u.logio.data);
|
|
|
|
break;
|
2017-12-28 20:33:26 +00:00
|
|
|
case QLA_EVT_GPNFT:
|
2018-03-21 06:09:40 +00:00
|
|
|
qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type,
|
|
|
|
e->u.gpnft.sp);
|
2017-12-28 20:33:26 +00:00
|
|
|
break;
|
|
|
|
case QLA_EVT_GPNFT_DONE:
|
|
|
|
qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
|
|
|
|
break;
|
|
|
|
case QLA_EVT_GNNFT_DONE:
|
|
|
|
qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
|
|
|
|
break;
|
|
|
|
case QLA_EVT_GNNID:
|
|
|
|
qla24xx_async_gnnid(vha, e->u.fcport.fcport);
|
|
|
|
break;
|
|
|
|
case QLA_EVT_GFPNID:
|
|
|
|
qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
|
|
|
|
break;
|
2017-12-28 20:33:31 +00:00
|
|
|
case QLA_EVT_SP_RETRY:
|
|
|
|
qla_sp_retry(vha, e);
|
2018-05-01 16:01:48 +00:00
|
|
|
break;
|
|
|
|
case QLA_EVT_IIDMA:
|
|
|
|
qla_do_iidma_work(vha, e->u.fcport.fcport);
|
|
|
|
break;
|
2018-08-02 20:16:57 +00:00
|
|
|
case QLA_EVT_ELS_PLOGI:
|
|
|
|
qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
|
|
|
|
e->u.fcport.fcport, false);
|
|
|
|
break;
|
2021-06-24 05:26:00 +00:00
|
|
|
case QLA_EVT_SA_REPLACE:
|
|
|
|
qla24xx_issue_sa_replace_iocb(vha, e);
|
|
|
|
break;
|
2008-04-03 20:13:18 +00:00
|
|
|
}
|
2019-01-25 07:23:42 +00:00
|
|
|
|
|
|
|
if (rc == EAGAIN) {
|
|
|
|
/* put 'work' at head of 'vha->work_list' */
|
|
|
|
spin_lock_irqsave(&vha->work_lock, flags);
|
|
|
|
list_splice(&work, &vha->work_list);
|
|
|
|
spin_unlock_irqrestore(&vha->work_lock, flags);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
list_del_init(&e->list);
|
2008-04-03 20:13:18 +00:00
|
|
|
if (e->flags & QLA_EVT_FLAG_FREE)
|
|
|
|
kfree(e);
|
2010-09-03 21:57:00 +00:00
|
|
|
|
|
|
|
/* For each work completed decrement vha ref count */
|
|
|
|
QLA_VHA_MARK_NOT_BUSY(vha);
|
2008-11-06 18:40:51 +00:00
|
|
|
}
|
|
|
|
}
|
2009-06-03 16:55:28 +00:00
|
|
|
|
2017-12-28 20:33:16 +00:00
|
|
|
int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
|
|
|
|
{
|
|
|
|
struct qla_work_evt *e;
|
|
|
|
|
|
|
|
e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
|
|
|
|
|
|
|
|
if (!e) {
|
|
|
|
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
|
|
|
|
return QLA_FUNCTION_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return qla2x00_post_work(vha, e);
|
|
|
|
}
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
/* Relogins all the fcports of a vport
|
|
|
|
* Context: dpc thread
|
|
|
|
*/
|
|
|
|
void qla2x00_relogin(struct scsi_qla_host *vha)
|
|
|
|
{
|
|
|
|
fc_port_t *fcport;
|
2018-08-02 20:16:45 +00:00
|
|
|
int status, relogin_needed = 0;
|
2017-01-20 06:28:00 +00:00
|
|
|
struct event_arg ea;
|
2008-11-06 18:40:51 +00:00
|
|
|
|
|
|
|
list_for_each_entry(fcport, &vha->vp_fcports, list) {
|
2017-12-28 20:33:24 +00:00
|
|
|
/*
|
|
|
|
* If the port is not ONLINE then try to login
|
|
|
|
* to it if we haven't run out of retries.
|
|
|
|
*/
|
2010-05-04 22:01:26 +00:00
|
|
|
if (atomic_read(&fcport->state) != FCS_ONLINE &&
|
2018-08-02 20:16:45 +00:00
|
|
|
fcport->login_retry) {
|
|
|
|
if (fcport->scan_state != QLA_FCPORT_FOUND ||
|
2021-06-24 05:26:02 +00:00
|
|
|
fcport->disc_state == DSC_LOGIN_AUTH_PEND ||
|
2018-08-02 20:16:45 +00:00
|
|
|
fcport->disc_state == DSC_LOGIN_COMPLETE)
|
|
|
|
continue;
|
2008-11-06 18:40:51 +00:00
|
|
|
|
2018-08-02 20:16:45 +00:00
|
|
|
if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) ||
|
|
|
|
fcport->disc_state == DSC_DELETE_PEND) {
|
|
|
|
relogin_needed = 1;
|
|
|
|
} else {
|
|
|
|
if (vha->hw->current_topology != ISP_CFG_NL) {
|
|
|
|
memset(&ea, 0, sizeof(ea));
|
|
|
|
ea.fcport = fcport;
|
2019-08-09 03:02:15 +00:00
|
|
|
qla24xx_handle_relogin_event(vha, &ea);
|
2018-08-02 20:16:45 +00:00
|
|
|
} else if (vha->hw->current_topology ==
|
|
|
|
ISP_CFG_NL) {
|
|
|
|
fcport->login_retry--;
|
|
|
|
status =
|
|
|
|
qla2x00_local_device_login(vha,
|
|
|
|
fcport);
|
|
|
|
if (status == QLA_SUCCESS) {
|
|
|
|
fcport->old_loop_id =
|
|
|
|
fcport->loop_id;
|
|
|
|
ql_dbg(ql_dbg_disc, vha, 0x2003,
|
|
|
|
"Port login OK: logged in ID 0x%x.\n",
|
|
|
|
fcport->loop_id);
|
|
|
|
qla2x00_update_fcport
|
|
|
|
(vha, fcport);
|
|
|
|
} else if (status == 1) {
|
|
|
|
set_bit(RELOGIN_NEEDED,
|
|
|
|
&vha->dpc_flags);
|
|
|
|
/* retry the login again */
|
|
|
|
ql_dbg(ql_dbg_disc, vha, 0x2007,
|
|
|
|
"Retrying %d login again loop_id 0x%x.\n",
|
|
|
|
fcport->login_retry,
|
|
|
|
fcport->loop_id);
|
|
|
|
} else {
|
|
|
|
fcport->login_retry = 0;
|
|
|
|
}
|
2008-11-06 18:40:51 +00:00
|
|
|
|
2018-08-02 20:16:45 +00:00
|
|
|
if (fcport->login_retry == 0 &&
|
|
|
|
status != QLA_SUCCESS)
|
|
|
|
qla2x00_clear_loop_id(fcport);
|
|
|
|
}
|
2008-11-06 18:40:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
|
|
|
|
break;
|
2008-04-03 20:13:18 +00:00
|
|
|
}
|
2017-12-28 20:33:16 +00:00
|
|
|
|
2018-08-02 20:16:45 +00:00
|
|
|
if (relogin_needed)
|
|
|
|
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
|
|
|
|
|
2017-12-28 20:33:16 +00:00
|
|
|
ql_dbg(ql_dbg_disc, vha, 0x400e,
|
|
|
|
"Relogin end.\n");
|
2008-04-03 20:13:18 +00:00
|
|
|
}
|
|
|
|
|
2012-08-22 18:21:03 +00:00
|
|
|
/* Schedule work on any of the dpc-workqueues */
|
|
|
|
void
|
|
|
|
qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha = base_vha->hw;
|
|
|
|
|
|
|
|
switch (work_code) {
|
|
|
|
case MBA_IDC_AEN: /* 0x8200 */
|
|
|
|
if (ha->dpc_lp_wq)
|
|
|
|
queue_work(ha->dpc_lp_wq, &ha->idc_aen);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QLA83XX_NIC_CORE_RESET: /* 0x1 */
|
|
|
|
if (!ha->flags.nic_core_reset_hdlr_active) {
|
|
|
|
if (ha->dpc_hp_wq)
|
|
|
|
queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
|
|
|
|
} else
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
|
|
|
|
"NIC Core reset is already active. Skip "
|
|
|
|
"scheduling it again.\n");
|
|
|
|
break;
|
|
|
|
case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
|
|
|
|
if (ha->dpc_hp_wq)
|
|
|
|
queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
|
|
|
|
break;
|
|
|
|
case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
|
|
|
|
if (ha->dpc_hp_wq)
|
|
|
|
queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ql_log(ql_log_warn, base_vha, 0xb05f,
|
2015-02-27 14:52:31 +00:00
|
|
|
"Unknown work-code=0x%x.\n", work_code);
|
2012-08-22 18:21:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Work: Perform NIC Core Unrecoverable state handling */
|
|
|
|
void
|
|
|
|
qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha =
|
2012-08-22 18:21:35 +00:00
|
|
|
container_of(work, struct qla_hw_data, nic_core_unrecoverable);
|
2012-08-22 18:21:03 +00:00
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
|
|
|
uint32_t dev_state = 0;
|
|
|
|
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
|
|
|
|
qla83xx_reset_ownership(base_vha);
|
|
|
|
if (ha->flags.nic_core_reset_owner) {
|
|
|
|
ha->flags.nic_core_reset_owner = 0;
|
|
|
|
qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
|
|
|
|
QLA8XXX_DEV_FAILED);
|
|
|
|
ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
|
|
|
|
qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
|
|
|
|
}
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Work: Execute IDC state handler */
|
|
|
|
void
|
|
|
|
qla83xx_idc_state_handler_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha =
|
2012-08-22 18:21:35 +00:00
|
|
|
container_of(work, struct qla_hw_data, idc_state_handler);
|
2012-08-22 18:21:03 +00:00
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
|
|
|
uint32_t dev_state = 0;
|
|
|
|
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
|
|
|
|
if (dev_state == QLA8XXX_DEV_FAILED ||
|
|
|
|
dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
|
|
|
|
qla83xx_idc_state_handler(base_vha);
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
}
|
|
|
|
|
2012-11-21 07:40:29 +00:00
|
|
|
static int
|
2012-08-22 18:21:03 +00:00
|
|
|
qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
|
|
|
|
{
|
|
|
|
int rval = QLA_SUCCESS;
|
|
|
|
unsigned long heart_beat_wait = jiffies + (1 * HZ);
|
|
|
|
uint32_t heart_beat_counter1, heart_beat_counter2;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (time_after(jiffies, heart_beat_wait)) {
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
|
|
|
|
"Nic Core f/w is not alive.\n");
|
|
|
|
rval = QLA_FUNCTION_FAILED;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
|
|
|
|
&heart_beat_counter1);
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
msleep(100);
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
|
|
|
|
&heart_beat_counter2);
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
} while (heart_beat_counter1 == heart_beat_counter2);
|
|
|
|
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Work: Perform NIC Core Reset handling */
|
|
|
|
void
|
|
|
|
qla83xx_nic_core_reset_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha =
|
|
|
|
container_of(work, struct qla_hw_data, nic_core_reset);
|
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
|
|
|
uint32_t dev_state = 0;
|
|
|
|
|
2012-08-22 18:21:04 +00:00
|
|
|
if (IS_QLA2031(ha)) {
|
|
|
|
if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
|
|
|
|
ql_log(ql_log_warn, base_vha, 0xb081,
|
|
|
|
"Failed to dump mctp\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-08-22 18:21:03 +00:00
|
|
|
if (!ha->flags.nic_core_reset_hdlr_active) {
|
|
|
|
if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
|
|
|
|
&dev_state);
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
if (dev_state != QLA8XXX_DEV_NEED_RESET) {
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
|
|
|
|
"Nic Core f/w is alive.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ha->flags.nic_core_reset_hdlr_active = 1;
|
|
|
|
if (qla83xx_nic_core_reset(base_vha)) {
|
|
|
|
/* NIC Core reset failed. */
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
|
|
|
|
"NIC Core reset failed.\n");
|
|
|
|
}
|
|
|
|
ha->flags.nic_core_reset_hdlr_active = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Work: Handle 8200 IDC aens */
|
|
|
|
void
|
|
|
|
qla83xx_service_idc_aen(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha =
|
|
|
|
container_of(work, struct qla_hw_data, idc_aen);
|
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
|
|
|
uint32_t dev_state, idc_control;
|
|
|
|
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
|
|
|
|
qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
if (dev_state == QLA8XXX_DEV_NEED_RESET) {
|
|
|
|
if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
|
|
|
|
"Application requested NIC Core Reset.\n");
|
|
|
|
qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
|
|
|
|
} else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
|
|
|
|
QLA_SUCCESS) {
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
|
|
|
|
"Other protocol driver requested NIC Core Reset.\n");
|
|
|
|
qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
|
|
|
|
}
|
|
|
|
} else if (dev_state == QLA8XXX_DEV_FAILED ||
|
|
|
|
dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
|
|
|
|
qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
scsi: qla2xxx: Remove in_interrupt() from qla83xx-specific code
qla83xx_wait_logic() is used to control the frequency of device IDC lock
retries. If in_interrupt() is true, it does 20 loops of cpu_relax().
Otherwise, it sleeps for 100ms and yields the CPU.
While in_interrupt() is ill-defined and does not provide what the name
suggests, it is not needed here: that qla83xx_wait_logic() is exclusively
called by qla83xx_idc_lock() / unlock(), and they always run from process
context. Below is an analysis of all the idc lock/unlock callers, in order
of appearance:
- qla_os.c:
qla83xx_nic_core_unrecoverable_work(),
qla83xx_idc_state_handler_work(),
qla83xx_nic_core_reset_work(),
qla83xx_service_idc_aen(), all workqueue context
- qla_os.c: qla83xx_check_nic_core_fw_alive(), has msleep()
- qla_os.c: qla83xx_set_drv_presence(), called once from
qla2x00_abort_isp(), which is bound to process-context ->abort_isp()
hook. It also invokes wait_for_completion_timeout() through the chain
qla2x00_configure_hba() => qla24xx_link_initialize() =>
qla2x00_mailbox_command().
- qla_os.c: qla83xx_clear_drv_presence(), which is called from
qla2x00_abort_isp() discussed above, and from qla2x00_remove_one()
which is PCI process-context ->remove() hook.
- qla_os.c: qla83xx_need_reset_handler(), has a one second msleep() in
a loop.
- qla_os.c: qla83xx_device_bootstrap(), called only by
qla83xx_idc_state_handler(), which has multiple msleep()
invocations.
- qla_os.c: qla83xx_idc_state_handler(), multiple msleep()
invocations.
- qla_attr.c: qla2x00_sysfs_write_reset(), sysfs bin_attribute
->write() hook, process context
- qla_init.c: qla83xx_nic_core_fw_load()
=> qla_init.c: qla2x00_initialize_adapter()
=> bound to isp_operations ->initialize_adapter() hook
** => qla_os.c: qla2x00_probe_one(), PCI ->probe() process ctx
- qla_init.c: qla83xx_initiating_reset(), msleep() in a loop.
- qla_init.c: qla83xx_nic_core_reset(), called by
qla83xx_nic_core_reset_work(), workqueue context.
Remove the in_interrupt() check, and thus replace the entirety of
qla83xx_wait_logic() with an msleep(QLA83XX_WAIT_LOGIC_MS).
Mark qla83xx_idc_lock() / unlock() with "Context: task, can sleep".
Link: https://lore.kernel.org/r/20201126132952.2287996-7-bigeasy@linutronix.de
Cc: Nilesh Javali <njavali@marvell.com>
Cc: GR-QLogic-Storage-Upstream@marvell.com
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-11-26 13:29:44 +00:00
|
|
|
/*
|
|
|
|
* Control the frequency of IDC lock retries
|
|
|
|
*/
|
|
|
|
#define QLA83XX_WAIT_LOGIC_MS 100
|
2012-08-22 18:21:03 +00:00
|
|
|
|
2012-11-21 07:40:29 +00:00
|
|
|
static int
|
2012-08-22 18:21:03 +00:00
|
|
|
qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
|
|
|
|
{
|
|
|
|
int rval;
|
|
|
|
uint32_t data;
|
|
|
|
uint32_t idc_lck_rcvry_stage_mask = 0x3;
|
|
|
|
uint32_t idc_lck_rcvry_owner_mask = 0x3c;
|
|
|
|
struct qla_hw_data *ha = base_vha->hw;
|
2019-04-11 21:53:17 +00:00
|
|
|
|
2013-02-08 06:57:53 +00:00
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
|
|
|
|
"Trying force recovery of the IDC lock.\n");
|
2012-08-22 18:21:03 +00:00
|
|
|
|
|
|
|
rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
|
|
|
|
if (rval)
|
|
|
|
return rval;
|
|
|
|
|
|
|
|
if ((data & idc_lck_rcvry_stage_mask) > 0) {
|
|
|
|
return QLA_SUCCESS;
|
|
|
|
} else {
|
|
|
|
data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
|
|
|
|
rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
|
|
|
|
data);
|
|
|
|
if (rval)
|
|
|
|
return rval;
|
|
|
|
|
|
|
|
msleep(200);
|
|
|
|
|
|
|
|
rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
|
|
|
|
&data);
|
|
|
|
if (rval)
|
|
|
|
return rval;
|
|
|
|
|
|
|
|
if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
|
|
|
|
data &= (IDC_LOCK_RECOVERY_STAGE2 |
|
|
|
|
~(idc_lck_rcvry_stage_mask));
|
|
|
|
rval = qla83xx_wr_reg(base_vha,
|
|
|
|
QLA83XX_IDC_LOCK_RECOVERY, data);
|
|
|
|
if (rval)
|
|
|
|
return rval;
|
|
|
|
|
|
|
|
/* Forcefully perform IDC UnLock */
|
|
|
|
rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
|
|
|
|
&data);
|
|
|
|
if (rval)
|
|
|
|
return rval;
|
|
|
|
/* Clear lock-id by setting 0xff */
|
|
|
|
rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
|
|
|
|
0xff);
|
|
|
|
if (rval)
|
|
|
|
return rval;
|
|
|
|
/* Clear lock-recovery by setting 0x0 */
|
|
|
|
rval = qla83xx_wr_reg(base_vha,
|
|
|
|
QLA83XX_IDC_LOCK_RECOVERY, 0x0);
|
|
|
|
if (rval)
|
|
|
|
return rval;
|
|
|
|
} else
|
|
|
|
return QLA_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
2012-11-21 07:40:29 +00:00
|
|
|
static int
|
2012-08-22 18:21:03 +00:00
|
|
|
qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
|
|
|
|
{
|
|
|
|
int rval = QLA_SUCCESS;
|
|
|
|
uint32_t o_drv_lockid, n_drv_lockid;
|
|
|
|
unsigned long lock_recovery_timeout;
|
|
|
|
|
|
|
|
lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
|
|
|
|
retry_lockid:
|
|
|
|
rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
|
|
|
|
if (rval)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
/* MAX wait time before forcing IDC Lock recovery = 2 secs */
|
|
|
|
if (time_after_eq(jiffies, lock_recovery_timeout)) {
|
|
|
|
if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
|
|
|
|
return QLA_SUCCESS;
|
|
|
|
else
|
|
|
|
return QLA_FUNCTION_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
|
|
|
|
if (rval)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
if (o_drv_lockid == n_drv_lockid) {
|
scsi: qla2xxx: Remove in_interrupt() from qla83xx-specific code
qla83xx_wait_logic() is used to control the frequency of device IDC lock
retries. If in_interrupt() is true, it does 20 loops of cpu_relax().
Otherwise, it sleeps for 100ms and yields the CPU.
While in_interrupt() is ill-defined and does not provide what the name
suggests, it is not needed here: that qla83xx_wait_logic() is exclusively
called by qla83xx_idc_lock() / unlock(), and they always run from process
context. Below is an analysis of all the idc lock/unlock callers, in order
of appearance:
- qla_os.c:
qla83xx_nic_core_unrecoverable_work(),
qla83xx_idc_state_handler_work(),
qla83xx_nic_core_reset_work(),
qla83xx_service_idc_aen(), all workqueue context
- qla_os.c: qla83xx_check_nic_core_fw_alive(), has msleep()
- qla_os.c: qla83xx_set_drv_presence(), called once from
qla2x00_abort_isp(), which is bound to process-context ->abort_isp()
hook. It also invokes wait_for_completion_timeout() through the chain
qla2x00_configure_hba() => qla24xx_link_initialize() =>
qla2x00_mailbox_command().
- qla_os.c: qla83xx_clear_drv_presence(), which is called from
qla2x00_abort_isp() discussed above, and from qla2x00_remove_one()
which is PCI process-context ->remove() hook.
- qla_os.c: qla83xx_need_reset_handler(), has a one second msleep() in
a loop.
- qla_os.c: qla83xx_device_bootstrap(), called only by
qla83xx_idc_state_handler(), which has multiple msleep()
invocations.
- qla_os.c: qla83xx_idc_state_handler(), multiple msleep()
invocations.
- qla_attr.c: qla2x00_sysfs_write_reset(), sysfs bin_attribute
->write() hook, process context
- qla_init.c: qla83xx_nic_core_fw_load()
=> qla_init.c: qla2x00_initialize_adapter()
=> bound to isp_operations ->initialize_adapter() hook
** => qla_os.c: qla2x00_probe_one(), PCI ->probe() process ctx
- qla_init.c: qla83xx_initiating_reset(), msleep() in a loop.
- qla_init.c: qla83xx_nic_core_reset(), called by
qla83xx_nic_core_reset_work(), workqueue context.
Remove the in_interrupt() check, and thus replace the entirety of
qla83xx_wait_logic() with an msleep(QLA83XX_WAIT_LOGIC_MS).
Mark qla83xx_idc_lock() / unlock() with "Context: task, can sleep".
Link: https://lore.kernel.org/r/20201126132952.2287996-7-bigeasy@linutronix.de
Cc: Nilesh Javali <njavali@marvell.com>
Cc: GR-QLogic-Storage-Upstream@marvell.com
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-11-26 13:29:44 +00:00
|
|
|
msleep(QLA83XX_WAIT_LOGIC_MS);
|
2012-08-22 18:21:03 +00:00
|
|
|
goto retry_lockid;
|
|
|
|
} else
|
|
|
|
return QLA_SUCCESS;
|
|
|
|
|
|
|
|
exit:
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
scsi: qla2xxx: Remove in_interrupt() from qla83xx-specific code
qla83xx_wait_logic() is used to control the frequency of device IDC lock
retries. If in_interrupt() is true, it does 20 loops of cpu_relax().
Otherwise, it sleeps for 100ms and yields the CPU.
While in_interrupt() is ill-defined and does not provide what the name
suggests, it is not needed here: that qla83xx_wait_logic() is exclusively
called by qla83xx_idc_lock() / unlock(), and they always run from process
context. Below is an analysis of all the idc lock/unlock callers, in order
of appearance:
- qla_os.c:
qla83xx_nic_core_unrecoverable_work(),
qla83xx_idc_state_handler_work(),
qla83xx_nic_core_reset_work(),
qla83xx_service_idc_aen(), all workqueue context
- qla_os.c: qla83xx_check_nic_core_fw_alive(), has msleep()
- qla_os.c: qla83xx_set_drv_presence(), called once from
qla2x00_abort_isp(), which is bound to process-context ->abort_isp()
hook. It also invokes wait_for_completion_timeout() through the chain
qla2x00_configure_hba() => qla24xx_link_initialize() =>
qla2x00_mailbox_command().
- qla_os.c: qla83xx_clear_drv_presence(), which is called from
qla2x00_abort_isp() discussed above, and from qla2x00_remove_one()
which is PCI process-context ->remove() hook.
- qla_os.c: qla83xx_need_reset_handler(), has a one second msleep() in
a loop.
- qla_os.c: qla83xx_device_bootstrap(), called only by
qla83xx_idc_state_handler(), which has multiple msleep()
invocations.
- qla_os.c: qla83xx_idc_state_handler(), multiple msleep()
invocations.
- qla_attr.c: qla2x00_sysfs_write_reset(), sysfs bin_attribute
->write() hook, process context
- qla_init.c: qla83xx_nic_core_fw_load()
=> qla_init.c: qla2x00_initialize_adapter()
=> bound to isp_operations ->initialize_adapter() hook
** => qla_os.c: qla2x00_probe_one(), PCI ->probe() process ctx
- qla_init.c: qla83xx_initiating_reset(), msleep() in a loop.
- qla_init.c: qla83xx_nic_core_reset(), called by
qla83xx_nic_core_reset_work(), workqueue context.
Remove the in_interrupt() check, and thus replace the entirety of
qla83xx_wait_logic() with an msleep(QLA83XX_WAIT_LOGIC_MS).
Mark qla83xx_idc_lock() / unlock() with "Context: task, can sleep".
Link: https://lore.kernel.org/r/20201126132952.2287996-7-bigeasy@linutronix.de
Cc: Nilesh Javali <njavali@marvell.com>
Cc: GR-QLogic-Storage-Upstream@marvell.com
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-11-26 13:29:44 +00:00
|
|
|
/*
|
|
|
|
* Context: task, can sleep
|
|
|
|
*/
|
2012-08-22 18:21:03 +00:00
|
|
|
void
|
|
|
|
qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
|
|
|
|
{
|
|
|
|
uint32_t data;
|
2013-02-08 06:57:53 +00:00
|
|
|
uint32_t lock_owner;
|
2012-08-22 18:21:03 +00:00
|
|
|
struct qla_hw_data *ha = base_vha->hw;
|
|
|
|
|
scsi: qla2xxx: Remove in_interrupt() from qla83xx-specific code
qla83xx_wait_logic() is used to control the frequency of device IDC lock
retries. If in_interrupt() is true, it does 20 loops of cpu_relax().
Otherwise, it sleeps for 100ms and yields the CPU.
While in_interrupt() is ill-defined and does not provide what the name
suggests, it is not needed here: that qla83xx_wait_logic() is exclusively
called by qla83xx_idc_lock() / unlock(), and they always run from process
context. Below is an analysis of all the idc lock/unlock callers, in order
of appearance:
- qla_os.c:
qla83xx_nic_core_unrecoverable_work(),
qla83xx_idc_state_handler_work(),
qla83xx_nic_core_reset_work(),
qla83xx_service_idc_aen(), all workqueue context
- qla_os.c: qla83xx_check_nic_core_fw_alive(), has msleep()
- qla_os.c: qla83xx_set_drv_presence(), called once from
qla2x00_abort_isp(), which is bound to process-context ->abort_isp()
hook. It also invokes wait_for_completion_timeout() through the chain
qla2x00_configure_hba() => qla24xx_link_initialize() =>
qla2x00_mailbox_command().
- qla_os.c: qla83xx_clear_drv_presence(), which is called from
qla2x00_abort_isp() discussed above, and from qla2x00_remove_one()
which is PCI process-context ->remove() hook.
- qla_os.c: qla83xx_need_reset_handler(), has a one second msleep() in
a loop.
- qla_os.c: qla83xx_device_bootstrap(), called only by
qla83xx_idc_state_handler(), which has multiple msleep()
invocations.
- qla_os.c: qla83xx_idc_state_handler(), multiple msleep()
invocations.
- qla_attr.c: qla2x00_sysfs_write_reset(), sysfs bin_attribute
->write() hook, process context
- qla_init.c: qla83xx_nic_core_fw_load()
=> qla_init.c: qla2x00_initialize_adapter()
=> bound to isp_operations ->initialize_adapter() hook
** => qla_os.c: qla2x00_probe_one(), PCI ->probe() process ctx
- qla_init.c: qla83xx_initiating_reset(), msleep() in a loop.
- qla_init.c: qla83xx_nic_core_reset(), called by
qla83xx_nic_core_reset_work(), workqueue context.
Remove the in_interrupt() check, and thus replace the entirety of
qla83xx_wait_logic() with an msleep(QLA83XX_WAIT_LOGIC_MS).
Mark qla83xx_idc_lock() / unlock() with "Context: task, can sleep".
Link: https://lore.kernel.org/r/20201126132952.2287996-7-bigeasy@linutronix.de
Cc: Nilesh Javali <njavali@marvell.com>
Cc: GR-QLogic-Storage-Upstream@marvell.com
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-11-26 13:29:44 +00:00
|
|
|
might_sleep();
|
|
|
|
|
2012-08-22 18:21:03 +00:00
|
|
|
/* IDC-lock implementation using driver-lock/lock-id remote registers */
|
|
|
|
retry_lock:
|
|
|
|
if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
|
|
|
|
== QLA_SUCCESS) {
|
|
|
|
if (data) {
|
|
|
|
/* Setting lock-id to our function-number */
|
|
|
|
qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
|
|
|
|
ha->portnum);
|
|
|
|
} else {
|
2013-02-08 06:57:53 +00:00
|
|
|
qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
|
|
|
|
&lock_owner);
|
2012-08-22 18:21:03 +00:00
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
|
2013-02-08 06:57:53 +00:00
|
|
|
"Failed to acquire IDC lock, acquired by %d, "
|
|
|
|
"retrying...\n", lock_owner);
|
2012-08-22 18:21:03 +00:00
|
|
|
|
|
|
|
/* Retry/Perform IDC-Lock recovery */
|
|
|
|
if (qla83xx_idc_lock_recovery(base_vha)
|
|
|
|
== QLA_SUCCESS) {
|
scsi: qla2xxx: Remove in_interrupt() from qla83xx-specific code
qla83xx_wait_logic() is used to control the frequency of device IDC lock
retries. If in_interrupt() is true, it does 20 loops of cpu_relax().
Otherwise, it sleeps for 100ms and yields the CPU.
While in_interrupt() is ill-defined and does not provide what the name
suggests, it is not needed here: that qla83xx_wait_logic() is exclusively
called by qla83xx_idc_lock() / unlock(), and they always run from process
context. Below is an analysis of all the idc lock/unlock callers, in order
of appearance:
- qla_os.c:
qla83xx_nic_core_unrecoverable_work(),
qla83xx_idc_state_handler_work(),
qla83xx_nic_core_reset_work(),
qla83xx_service_idc_aen(), all workqueue context
- qla_os.c: qla83xx_check_nic_core_fw_alive(), has msleep()
- qla_os.c: qla83xx_set_drv_presence(), called once from
qla2x00_abort_isp(), which is bound to process-context ->abort_isp()
hook. It also invokes wait_for_completion_timeout() through the chain
qla2x00_configure_hba() => qla24xx_link_initialize() =>
qla2x00_mailbox_command().
- qla_os.c: qla83xx_clear_drv_presence(), which is called from
qla2x00_abort_isp() discussed above, and from qla2x00_remove_one()
which is PCI process-context ->remove() hook.
- qla_os.c: qla83xx_need_reset_handler(), has a one second msleep() in
a loop.
- qla_os.c: qla83xx_device_bootstrap(), called only by
qla83xx_idc_state_handler(), which has multiple msleep()
invocations.
- qla_os.c: qla83xx_idc_state_handler(), multiple msleep()
invocations.
- qla_attr.c: qla2x00_sysfs_write_reset(), sysfs bin_attribute
->write() hook, process context
- qla_init.c: qla83xx_nic_core_fw_load()
=> qla_init.c: qla2x00_initialize_adapter()
=> bound to isp_operations ->initialize_adapter() hook
** => qla_os.c: qla2x00_probe_one(), PCI ->probe() process ctx
- qla_init.c: qla83xx_initiating_reset(), msleep() in a loop.
- qla_init.c: qla83xx_nic_core_reset(), called by
qla83xx_nic_core_reset_work(), workqueue context.
Remove the in_interrupt() check, and thus replace the entirety of
qla83xx_wait_logic() with an msleep(QLA83XX_WAIT_LOGIC_MS).
Mark qla83xx_idc_lock() / unlock() with "Context: task, can sleep".
Link: https://lore.kernel.org/r/20201126132952.2287996-7-bigeasy@linutronix.de
Cc: Nilesh Javali <njavali@marvell.com>
Cc: GR-QLogic-Storage-Upstream@marvell.com
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-11-26 13:29:44 +00:00
|
|
|
msleep(QLA83XX_WAIT_LOGIC_MS);
|
2012-08-22 18:21:03 +00:00
|
|
|
goto retry_lock;
|
|
|
|
} else
|
|
|
|
ql_log(ql_log_warn, base_vha, 0xb075,
|
|
|
|
"IDC Lock recovery FAILED.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-12 21:44:25 +00:00
|
|
|
static bool
|
|
|
|
qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
|
|
|
|
struct purex_entry_24xx *purex)
|
|
|
|
{
|
|
|
|
char fwstr[16];
|
|
|
|
u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0];
|
2020-02-12 21:44:26 +00:00
|
|
|
struct port_database_24xx *pdb;
|
2020-02-12 21:44:25 +00:00
|
|
|
|
|
|
|
/* Domain Controller is always logged-out. */
|
|
|
|
/* if RDP request is not from Domain Controller: */
|
|
|
|
if (sid != 0xfffc01)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid);
|
|
|
|
|
2020-02-12 21:44:26 +00:00
|
|
|
pdb = kzalloc(sizeof(*pdb), GFP_KERNEL);
|
|
|
|
if (!pdb) {
|
|
|
|
ql_dbg(ql_dbg_init, vha, 0x0181,
|
|
|
|
"%s: Failed allocate pdb\n", __func__);
|
2020-05-18 21:17:12 +00:00
|
|
|
} else if (qla24xx_get_port_database(vha,
|
|
|
|
le16_to_cpu(purex->nport_handle), pdb)) {
|
2020-02-12 21:44:26 +00:00
|
|
|
ql_dbg(ql_dbg_init, vha, 0x0181,
|
|
|
|
"%s: Failed get pdb sid=%x\n", __func__, sid);
|
|
|
|
} else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
|
|
|
|
pdb->current_login_state != PDS_PRLI_COMPLETE) {
|
|
|
|
ql_dbg(ql_dbg_init, vha, 0x0181,
|
|
|
|
"%s: Port not logged in sid=%#x\n", __func__, sid);
|
|
|
|
} else {
|
|
|
|
/* RDP request is from logged in port */
|
|
|
|
kfree(pdb);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
kfree(pdb);
|
|
|
|
|
2020-02-12 21:44:25 +00:00
|
|
|
vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr));
|
|
|
|
fwstr[strcspn(fwstr, " ")] = 0;
|
|
|
|
/* if FW version allows RDP response length upto 2048 bytes: */
|
|
|
|
if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr);
|
|
|
|
|
|
|
|
/* RDP response length is to be reduced to maximum 256 bytes */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-02-12 21:44:18 +00:00
|
|
|
/*
|
|
|
|
* Function Name: qla24xx_process_purex_iocb
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Prepare a RDP response and send to Fabric switch
|
|
|
|
*
|
|
|
|
* PARAMETERS:
|
|
|
|
* vha: SCSI qla host
|
|
|
|
* purex: RDP request received by HBA
|
|
|
|
*/
|
2020-06-30 10:22:28 +00:00
|
|
|
void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
|
|
|
|
struct purex_item *item)
|
2020-02-12 21:44:18 +00:00
|
|
|
{
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2020-06-30 10:22:28 +00:00
|
|
|
struct purex_entry_24xx *purex =
|
|
|
|
(struct purex_entry_24xx *)&item->iocb;
|
2020-02-12 21:44:18 +00:00
|
|
|
dma_addr_t rsp_els_dma;
|
|
|
|
dma_addr_t rsp_payload_dma;
|
|
|
|
dma_addr_t stat_dma;
|
|
|
|
dma_addr_t sfp_dma;
|
|
|
|
struct els_entry_24xx *rsp_els = NULL;
|
|
|
|
struct rdp_rsp_payload *rsp_payload = NULL;
|
|
|
|
struct link_statistics *stat = NULL;
|
|
|
|
uint8_t *sfp = NULL;
|
|
|
|
uint16_t sfp_flags = 0;
|
2020-02-12 21:44:25 +00:00
|
|
|
uint rsp_payload_length = sizeof(*rsp_payload);
|
2020-02-12 21:44:24 +00:00
|
|
|
int rval;
|
2020-02-12 21:44:18 +00:00
|
|
|
|
|
|
|
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180,
|
|
|
|
"%s: Enter\n", __func__);
|
|
|
|
|
|
|
|
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
|
|
|
|
"-------- ELS REQ -------\n");
|
|
|
|
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
|
2020-05-18 21:17:09 +00:00
|
|
|
purex, sizeof(*purex));
|
2020-02-12 21:44:18 +00:00
|
|
|
|
2020-02-12 21:44:25 +00:00
|
|
|
if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
|
|
|
|
rsp_payload_length =
|
|
|
|
offsetof(typeof(*rsp_payload), optical_elmt_desc);
|
|
|
|
ql_dbg(ql_dbg_init, vha, 0x0181,
|
|
|
|
"Reducing RSP payload length to %u bytes...\n",
|
|
|
|
rsp_payload_length);
|
|
|
|
}
|
|
|
|
|
2020-02-12 21:44:18 +00:00
|
|
|
rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els),
|
|
|
|
&rsp_els_dma, GFP_KERNEL);
|
2020-02-12 21:44:23 +00:00
|
|
|
if (!rsp_els) {
|
|
|
|
ql_log(ql_log_warn, vha, 0x0183,
|
|
|
|
"Failed allocate dma buffer ELS RSP.\n");
|
2020-02-12 21:44:18 +00:00
|
|
|
goto dealloc;
|
2020-02-12 21:44:23 +00:00
|
|
|
}
|
2020-02-12 21:44:18 +00:00
|
|
|
|
|
|
|
rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
|
|
|
|
&rsp_payload_dma, GFP_KERNEL);
|
2020-02-12 21:44:23 +00:00
|
|
|
if (!rsp_payload) {
|
|
|
|
ql_log(ql_log_warn, vha, 0x0184,
|
|
|
|
"Failed allocate dma buffer ELS RSP payload.\n");
|
2020-02-12 21:44:18 +00:00
|
|
|
goto dealloc;
|
2020-02-12 21:44:23 +00:00
|
|
|
}
|
2020-02-12 21:44:18 +00:00
|
|
|
|
|
|
|
sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
|
|
|
|
&sfp_dma, GFP_KERNEL);
|
|
|
|
|
|
|
|
stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
|
|
|
|
&stat_dma, GFP_KERNEL);
|
|
|
|
|
|
|
|
/* Prepare Response IOCB */
|
|
|
|
rsp_els->entry_type = ELS_IOCB_TYPE;
|
|
|
|
rsp_els->entry_count = 1;
|
|
|
|
rsp_els->sys_define = 0;
|
|
|
|
rsp_els->entry_status = 0;
|
|
|
|
rsp_els->handle = 0;
|
|
|
|
rsp_els->nport_handle = purex->nport_handle;
|
2020-05-18 21:17:12 +00:00
|
|
|
rsp_els->tx_dsd_count = cpu_to_le16(1);
|
2020-02-12 21:44:18 +00:00
|
|
|
rsp_els->vp_index = purex->vp_idx;
|
|
|
|
rsp_els->sof_type = EST_SOFI3;
|
|
|
|
rsp_els->rx_xchg_address = purex->rx_xchg_addr;
|
|
|
|
rsp_els->rx_dsd_count = 0;
|
|
|
|
rsp_els->opcode = purex->els_frame_payload[0];
|
|
|
|
|
2020-02-12 21:44:23 +00:00
|
|
|
rsp_els->d_id[0] = purex->s_id[0];
|
|
|
|
rsp_els->d_id[1] = purex->s_id[1];
|
|
|
|
rsp_els->d_id[2] = purex->s_id[2];
|
2020-02-12 21:44:18 +00:00
|
|
|
|
2020-05-18 21:17:12 +00:00
|
|
|
rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC);
|
2020-02-12 21:44:18 +00:00
|
|
|
rsp_els->rx_byte_count = 0;
|
2020-02-12 21:44:25 +00:00
|
|
|
rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
|
2020-02-12 21:44:18 +00:00
|
|
|
|
|
|
|
put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address);
|
|
|
|
rsp_els->tx_len = rsp_els->tx_byte_count;
|
|
|
|
|
|
|
|
rsp_els->rx_address = 0;
|
|
|
|
rsp_els->rx_len = 0;
|
|
|
|
|
|
|
|
/* Prepare Response Payload */
|
|
|
|
rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
|
2020-05-18 21:17:12 +00:00
|
|
|
rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) -
|
|
|
|
sizeof(rsp_payload->hdr));
|
2020-02-12 21:44:18 +00:00
|
|
|
|
|
|
|
/* Link service Request Info Descriptor */
|
|
|
|
rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
|
|
|
|
rsp_payload->ls_req_info_desc.desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc));
|
|
|
|
rsp_payload->ls_req_info_desc.req_payload_word_0 =
|
|
|
|
cpu_to_be32p((uint32_t *)purex->els_frame_payload);
|
|
|
|
|
|
|
|
/* Link service Request Info Descriptor 2 */
|
|
|
|
rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1);
|
|
|
|
rsp_payload->ls_req_info_desc2.desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2));
|
|
|
|
rsp_payload->ls_req_info_desc2.req_payload_word_0 =
|
|
|
|
cpu_to_be32p((uint32_t *)purex->els_frame_payload);
|
|
|
|
|
2020-02-26 22:40:16 +00:00
|
|
|
|
|
|
|
rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000);
|
|
|
|
rsp_payload->sfp_diag_desc.desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc));
|
|
|
|
|
2020-02-12 21:44:18 +00:00
|
|
|
if (sfp) {
|
|
|
|
/* SFP Flags */
|
|
|
|
memset(sfp, 0, SFP_RTDI_LEN);
|
|
|
|
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0);
|
|
|
|
if (!rval) {
|
|
|
|
/* SFP Flags bits 3-0: Port Tx Laser Type */
|
|
|
|
if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5))
|
|
|
|
sfp_flags |= BIT_0; /* short wave */
|
|
|
|
else if (sfp[0] & BIT_1)
|
|
|
|
sfp_flags |= BIT_1; /* long wave 1310nm */
|
|
|
|
else if (sfp[1] & BIT_4)
|
|
|
|
sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* SFP Type */
|
|
|
|
memset(sfp, 0, SFP_RTDI_LEN);
|
|
|
|
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0);
|
|
|
|
if (!rval) {
|
|
|
|
sfp_flags |= BIT_4; /* optical */
|
|
|
|
if (sfp[0] == 0x3)
|
|
|
|
sfp_flags |= BIT_6; /* sfp+ */
|
|
|
|
}
|
|
|
|
|
2020-02-26 22:40:16 +00:00
|
|
|
rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags);
|
|
|
|
|
2020-02-12 21:44:18 +00:00
|
|
|
/* SFP Diagnostics */
|
|
|
|
memset(sfp, 0, SFP_RTDI_LEN);
|
|
|
|
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
|
2020-02-26 22:40:16 +00:00
|
|
|
if (!rval) {
|
2020-05-18 21:17:12 +00:00
|
|
|
__be16 *trx = (__force __be16 *)sfp; /* already be16 */
|
2020-02-12 21:44:18 +00:00
|
|
|
rsp_payload->sfp_diag_desc.temperature = trx[0];
|
|
|
|
rsp_payload->sfp_diag_desc.vcc = trx[1];
|
|
|
|
rsp_payload->sfp_diag_desc.tx_bias = trx[2];
|
|
|
|
rsp_payload->sfp_diag_desc.tx_power = trx[3];
|
|
|
|
rsp_payload->sfp_diag_desc.rx_power = trx[4];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Port Speed Descriptor */
|
|
|
|
rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001);
|
|
|
|
rsp_payload->port_speed_desc.desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
|
|
|
|
rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
|
2020-09-04 04:51:20 +00:00
|
|
|
qla25xx_fdmi_port_speed_capability(ha));
|
2020-02-12 21:44:18 +00:00
|
|
|
rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
|
2020-09-04 04:51:20 +00:00
|
|
|
qla25xx_fdmi_port_speed_currently(ha));
|
2020-02-12 21:44:18 +00:00
|
|
|
|
2020-02-26 22:40:16 +00:00
|
|
|
/* Link Error Status Descriptor */
|
|
|
|
rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
|
|
|
|
rsp_payload->ls_err_desc.desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc));
|
|
|
|
|
2020-02-12 21:44:18 +00:00
|
|
|
if (stat) {
|
|
|
|
rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
|
|
|
|
if (!rval) {
|
|
|
|
rsp_payload->ls_err_desc.link_fail_cnt =
|
2020-05-18 21:17:12 +00:00
|
|
|
cpu_to_be32(le32_to_cpu(stat->link_fail_cnt));
|
2020-02-12 21:44:18 +00:00
|
|
|
rsp_payload->ls_err_desc.loss_sync_cnt =
|
2020-05-18 21:17:12 +00:00
|
|
|
cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt));
|
2020-02-12 21:44:18 +00:00
|
|
|
rsp_payload->ls_err_desc.loss_sig_cnt =
|
2020-05-18 21:17:12 +00:00
|
|
|
cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt));
|
2020-02-12 21:44:18 +00:00
|
|
|
rsp_payload->ls_err_desc.prim_seq_err_cnt =
|
2020-05-18 21:17:12 +00:00
|
|
|
cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt));
|
2020-02-12 21:44:18 +00:00
|
|
|
rsp_payload->ls_err_desc.inval_xmit_word_cnt =
|
2020-05-18 21:17:12 +00:00
|
|
|
cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt));
|
2020-02-12 21:44:18 +00:00
|
|
|
rsp_payload->ls_err_desc.inval_crc_cnt =
|
2020-05-18 21:17:12 +00:00
|
|
|
cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt));
|
2020-02-12 21:44:18 +00:00
|
|
|
rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Portname Descriptor */
|
|
|
|
rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003);
|
|
|
|
rsp_payload->port_name_diag_desc.desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc));
|
|
|
|
memcpy(rsp_payload->port_name_diag_desc.WWNN,
|
|
|
|
vha->node_name,
|
|
|
|
sizeof(rsp_payload->port_name_diag_desc.WWNN));
|
|
|
|
memcpy(rsp_payload->port_name_diag_desc.WWPN,
|
|
|
|
vha->port_name,
|
|
|
|
sizeof(rsp_payload->port_name_diag_desc.WWPN));
|
|
|
|
|
|
|
|
/* F-Port Portname Descriptor */
|
|
|
|
rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003);
|
|
|
|
rsp_payload->port_name_direct_desc.desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc));
|
|
|
|
memcpy(rsp_payload->port_name_direct_desc.WWNN,
|
|
|
|
vha->fabric_node_name,
|
|
|
|
sizeof(rsp_payload->port_name_direct_desc.WWNN));
|
|
|
|
memcpy(rsp_payload->port_name_direct_desc.WWPN,
|
|
|
|
vha->fabric_port_name,
|
|
|
|
sizeof(rsp_payload->port_name_direct_desc.WWPN));
|
|
|
|
|
2020-02-26 22:40:16 +00:00
|
|
|
/* Bufer Credit Descriptor */
|
|
|
|
rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006);
|
|
|
|
rsp_payload->buffer_credit_desc.desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc));
|
|
|
|
rsp_payload->buffer_credit_desc.fcport_b2b = 0;
|
|
|
|
rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
|
|
|
|
rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
|
|
|
|
|
2020-09-29 10:21:47 +00:00
|
|
|
if (ha->flags.plogi_template_valid) {
|
|
|
|
uint32_t tmp =
|
|
|
|
be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred);
|
|
|
|
rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp);
|
2020-02-12 21:44:18 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 21:44:25 +00:00
|
|
|
if (rsp_payload_length < sizeof(*rsp_payload))
|
|
|
|
goto send;
|
|
|
|
|
2020-02-26 22:40:16 +00:00
|
|
|
/* Optical Element Descriptor, Temperature */
|
|
|
|
rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007);
|
|
|
|
rsp_payload->optical_elmt_desc[0].desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
|
|
|
|
/* Optical Element Descriptor, Voltage */
|
|
|
|
rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007);
|
|
|
|
rsp_payload->optical_elmt_desc[1].desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
|
|
|
|
/* Optical Element Descriptor, Tx Bias Current */
|
|
|
|
rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007);
|
|
|
|
rsp_payload->optical_elmt_desc[2].desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
|
|
|
|
/* Optical Element Descriptor, Tx Power */
|
|
|
|
rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007);
|
|
|
|
rsp_payload->optical_elmt_desc[3].desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
|
|
|
|
/* Optical Element Descriptor, Rx Power */
|
|
|
|
rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007);
|
|
|
|
rsp_payload->optical_elmt_desc[4].desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
|
|
|
|
|
2020-02-12 21:44:18 +00:00
|
|
|
if (sfp) {
|
|
|
|
memset(sfp, 0, SFP_RTDI_LEN);
|
|
|
|
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
|
|
|
|
if (!rval) {
|
2020-05-18 21:17:12 +00:00
|
|
|
__be16 *trx = (__force __be16 *)sfp; /* already be16 */
|
2020-02-12 21:44:18 +00:00
|
|
|
|
|
|
|
/* Optical Element Descriptor, Temperature */
|
|
|
|
rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
|
|
|
|
rsp_payload->optical_elmt_desc[0].low_alarm = trx[1];
|
|
|
|
rsp_payload->optical_elmt_desc[0].high_warn = trx[2];
|
|
|
|
rsp_payload->optical_elmt_desc[0].low_warn = trx[3];
|
|
|
|
rsp_payload->optical_elmt_desc[0].element_flags =
|
|
|
|
cpu_to_be32(1 << 28);
|
|
|
|
|
|
|
|
/* Optical Element Descriptor, Voltage */
|
|
|
|
rsp_payload->optical_elmt_desc[1].high_alarm = trx[4];
|
|
|
|
rsp_payload->optical_elmt_desc[1].low_alarm = trx[5];
|
|
|
|
rsp_payload->optical_elmt_desc[1].high_warn = trx[6];
|
|
|
|
rsp_payload->optical_elmt_desc[1].low_warn = trx[7];
|
|
|
|
rsp_payload->optical_elmt_desc[1].element_flags =
|
|
|
|
cpu_to_be32(2 << 28);
|
|
|
|
|
|
|
|
/* Optical Element Descriptor, Tx Bias Current */
|
|
|
|
rsp_payload->optical_elmt_desc[2].high_alarm = trx[8];
|
|
|
|
rsp_payload->optical_elmt_desc[2].low_alarm = trx[9];
|
|
|
|
rsp_payload->optical_elmt_desc[2].high_warn = trx[10];
|
|
|
|
rsp_payload->optical_elmt_desc[2].low_warn = trx[11];
|
|
|
|
rsp_payload->optical_elmt_desc[2].element_flags =
|
|
|
|
cpu_to_be32(3 << 28);
|
|
|
|
|
|
|
|
/* Optical Element Descriptor, Tx Power */
|
|
|
|
rsp_payload->optical_elmt_desc[3].high_alarm = trx[12];
|
|
|
|
rsp_payload->optical_elmt_desc[3].low_alarm = trx[13];
|
|
|
|
rsp_payload->optical_elmt_desc[3].high_warn = trx[14];
|
|
|
|
rsp_payload->optical_elmt_desc[3].low_warn = trx[15];
|
|
|
|
rsp_payload->optical_elmt_desc[3].element_flags =
|
|
|
|
cpu_to_be32(4 << 28);
|
|
|
|
|
|
|
|
/* Optical Element Descriptor, Rx Power */
|
|
|
|
rsp_payload->optical_elmt_desc[4].high_alarm = trx[16];
|
|
|
|
rsp_payload->optical_elmt_desc[4].low_alarm = trx[17];
|
|
|
|
rsp_payload->optical_elmt_desc[4].high_warn = trx[18];
|
|
|
|
rsp_payload->optical_elmt_desc[4].low_warn = trx[19];
|
|
|
|
rsp_payload->optical_elmt_desc[4].element_flags =
|
|
|
|
cpu_to_be32(5 << 28);
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(sfp, 0, SFP_RTDI_LEN);
|
|
|
|
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0);
|
|
|
|
if (!rval) {
|
|
|
|
/* Temperature high/low alarm/warning */
|
|
|
|
rsp_payload->optical_elmt_desc[0].element_flags |=
|
|
|
|
cpu_to_be32(
|
|
|
|
(sfp[0] >> 7 & 1) << 3 |
|
|
|
|
(sfp[0] >> 6 & 1) << 2 |
|
|
|
|
(sfp[4] >> 7 & 1) << 1 |
|
|
|
|
(sfp[4] >> 6 & 1) << 0);
|
|
|
|
|
|
|
|
/* Voltage high/low alarm/warning */
|
|
|
|
rsp_payload->optical_elmt_desc[1].element_flags |=
|
|
|
|
cpu_to_be32(
|
|
|
|
(sfp[0] >> 5 & 1) << 3 |
|
|
|
|
(sfp[0] >> 4 & 1) << 2 |
|
|
|
|
(sfp[4] >> 5 & 1) << 1 |
|
|
|
|
(sfp[4] >> 4 & 1) << 0);
|
|
|
|
|
|
|
|
/* Tx Bias Current high/low alarm/warning */
|
|
|
|
rsp_payload->optical_elmt_desc[2].element_flags |=
|
|
|
|
cpu_to_be32(
|
|
|
|
(sfp[0] >> 3 & 1) << 3 |
|
|
|
|
(sfp[0] >> 2 & 1) << 2 |
|
|
|
|
(sfp[4] >> 3 & 1) << 1 |
|
|
|
|
(sfp[4] >> 2 & 1) << 0);
|
|
|
|
|
|
|
|
/* Tx Power high/low alarm/warning */
|
|
|
|
rsp_payload->optical_elmt_desc[3].element_flags |=
|
|
|
|
cpu_to_be32(
|
|
|
|
(sfp[0] >> 1 & 1) << 3 |
|
|
|
|
(sfp[0] >> 0 & 1) << 2 |
|
|
|
|
(sfp[4] >> 1 & 1) << 1 |
|
|
|
|
(sfp[4] >> 0 & 1) << 0);
|
|
|
|
|
|
|
|
/* Rx Power high/low alarm/warning */
|
|
|
|
rsp_payload->optical_elmt_desc[4].element_flags |=
|
|
|
|
cpu_to_be32(
|
|
|
|
(sfp[1] >> 7 & 1) << 3 |
|
|
|
|
(sfp[1] >> 6 & 1) << 2 |
|
|
|
|
(sfp[5] >> 7 & 1) << 1 |
|
|
|
|
(sfp[5] >> 6 & 1) << 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-26 22:40:16 +00:00
|
|
|
/* Optical Product Data Descriptor */
|
|
|
|
rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008);
|
|
|
|
rsp_payload->optical_prod_desc.desc_len =
|
|
|
|
cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc));
|
|
|
|
|
2020-02-12 21:44:18 +00:00
|
|
|
if (sfp) {
|
|
|
|
memset(sfp, 0, SFP_RTDI_LEN);
|
|
|
|
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0);
|
|
|
|
if (!rval) {
|
|
|
|
memcpy(rsp_payload->optical_prod_desc.vendor_name,
|
|
|
|
sfp + 0,
|
|
|
|
sizeof(rsp_payload->optical_prod_desc.vendor_name));
|
|
|
|
memcpy(rsp_payload->optical_prod_desc.part_number,
|
|
|
|
sfp + 20,
|
|
|
|
sizeof(rsp_payload->optical_prod_desc.part_number));
|
|
|
|
memcpy(rsp_payload->optical_prod_desc.revision,
|
|
|
|
sfp + 36,
|
|
|
|
sizeof(rsp_payload->optical_prod_desc.revision));
|
|
|
|
memcpy(rsp_payload->optical_prod_desc.serial_number,
|
|
|
|
sfp + 48,
|
|
|
|
sizeof(rsp_payload->optical_prod_desc.serial_number));
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(sfp, 0, SFP_RTDI_LEN);
|
|
|
|
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0);
|
|
|
|
if (!rval) {
|
|
|
|
memcpy(rsp_payload->optical_prod_desc.date,
|
|
|
|
sfp + 0,
|
|
|
|
sizeof(rsp_payload->optical_prod_desc.date));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
send:
|
|
|
|
ql_dbg(ql_dbg_init, vha, 0x0183,
|
|
|
|
"Sending ELS Response to RDP Request...\n");
|
|
|
|
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
|
|
|
|
"-------- ELS RSP -------\n");
|
|
|
|
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
|
2020-05-18 21:17:09 +00:00
|
|
|
rsp_els, sizeof(*rsp_els));
|
2020-02-12 21:44:18 +00:00
|
|
|
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
|
|
|
|
"-------- ELS RSP PAYLOAD -------\n");
|
|
|
|
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
|
2020-05-18 21:17:09 +00:00
|
|
|
rsp_payload, rsp_payload_length);
|
2020-02-12 21:44:18 +00:00
|
|
|
|
|
|
|
rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
|
|
|
|
|
2020-02-12 21:44:23 +00:00
|
|
|
if (rval) {
|
2020-02-12 21:44:18 +00:00
|
|
|
ql_log(ql_log_warn, vha, 0x0188,
|
2020-02-12 21:44:23 +00:00
|
|
|
"%s: iocb failed to execute -> %x\n", __func__, rval);
|
|
|
|
} else if (rsp_els->comp_status) {
|
2020-02-12 21:44:18 +00:00
|
|
|
ql_log(ql_log_warn, vha, 0x0189,
|
2020-02-12 21:44:23 +00:00
|
|
|
"%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
|
|
|
|
__func__, rsp_els->comp_status,
|
|
|
|
rsp_els->error_subcode_1, rsp_els->error_subcode_2);
|
2020-02-12 21:44:18 +00:00
|
|
|
} else {
|
|
|
|
ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
dealloc:
|
|
|
|
if (stat)
|
|
|
|
dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
|
|
|
|
stat, stat_dma);
|
|
|
|
if (sfp)
|
|
|
|
dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
|
|
|
|
sfp, sfp_dma);
|
|
|
|
if (rsp_payload)
|
|
|
|
dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
|
|
|
|
rsp_payload, rsp_payload_dma);
|
|
|
|
if (rsp_els)
|
|
|
|
dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els),
|
|
|
|
rsp_els, rsp_els_dma);
|
2020-02-12 21:44:24 +00:00
|
|
|
}
|
2020-02-12 21:44:18 +00:00
|
|
|
|
2020-06-30 10:22:28 +00:00
|
|
|
void
|
|
|
|
qla24xx_free_purex_item(struct purex_item *item)
|
|
|
|
{
|
|
|
|
if (item == &item->vha->default_item)
|
|
|
|
memset(&item->vha->default_item, 0, sizeof(struct purex_item));
|
|
|
|
else
|
|
|
|
kfree(item);
|
|
|
|
}
|
|
|
|
|
2020-02-12 21:44:24 +00:00
|
|
|
void qla24xx_process_purex_list(struct purex_list *list)
|
|
|
|
{
|
|
|
|
struct list_head head = LIST_HEAD_INIT(head);
|
|
|
|
struct purex_item *item, *next;
|
|
|
|
ulong flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&list->lock, flags);
|
|
|
|
list_splice_init(&list->head, &head);
|
|
|
|
spin_unlock_irqrestore(&list->lock, flags);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(item, next, &head, list) {
|
|
|
|
list_del(&item->list);
|
2020-06-30 10:22:28 +00:00
|
|
|
item->process_item(item->vha, item);
|
|
|
|
qla24xx_free_purex_item(item);
|
2020-02-12 21:44:24 +00:00
|
|
|
}
|
2020-02-12 21:44:18 +00:00
|
|
|
}
|
|
|
|
|
scsi: qla2xxx: Remove in_interrupt() from qla83xx-specific code
qla83xx_wait_logic() is used to control the frequency of device IDC lock
retries. If in_interrupt() is true, it does 20 loops of cpu_relax().
Otherwise, it sleeps for 100ms and yields the CPU.
While in_interrupt() is ill-defined and does not provide what the name
suggests, it is not needed here: that qla83xx_wait_logic() is exclusively
called by qla83xx_idc_lock() / unlock(), and they always run from process
context. Below is an analysis of all the idc lock/unlock callers, in order
of appearance:
- qla_os.c:
qla83xx_nic_core_unrecoverable_work(),
qla83xx_idc_state_handler_work(),
qla83xx_nic_core_reset_work(),
qla83xx_service_idc_aen(), all workqueue context
- qla_os.c: qla83xx_check_nic_core_fw_alive(), has msleep()
- qla_os.c: qla83xx_set_drv_presence(), called once from
qla2x00_abort_isp(), which is bound to process-context ->abort_isp()
hook. It also invokes wait_for_completion_timeout() through the chain
qla2x00_configure_hba() => qla24xx_link_initialize() =>
qla2x00_mailbox_command().
- qla_os.c: qla83xx_clear_drv_presence(), which is called from
qla2x00_abort_isp() discussed above, and from qla2x00_remove_one()
which is PCI process-context ->remove() hook.
- qla_os.c: qla83xx_need_reset_handler(), has a one second msleep() in
a loop.
- qla_os.c: qla83xx_device_bootstrap(), called only by
qla83xx_idc_state_handler(), which has multiple msleep()
invocations.
- qla_os.c: qla83xx_idc_state_handler(), multiple msleep()
invocations.
- qla_attr.c: qla2x00_sysfs_write_reset(), sysfs bin_attribute
->write() hook, process context
- qla_init.c: qla83xx_nic_core_fw_load()
=> qla_init.c: qla2x00_initialize_adapter()
=> bound to isp_operations ->initialize_adapter() hook
** => qla_os.c: qla2x00_probe_one(), PCI ->probe() process ctx
- qla_init.c: qla83xx_initiating_reset(), msleep() in a loop.
- qla_init.c: qla83xx_nic_core_reset(), called by
qla83xx_nic_core_reset_work(), workqueue context.
Remove the in_interrupt() check, and thus replace the entirety of
qla83xx_wait_logic() with an msleep(QLA83XX_WAIT_LOGIC_MS).
Mark qla83xx_idc_lock() / unlock() with "Context: task, can sleep".
Link: https://lore.kernel.org/r/20201126132952.2287996-7-bigeasy@linutronix.de
Cc: Nilesh Javali <njavali@marvell.com>
Cc: GR-QLogic-Storage-Upstream@marvell.com
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-11-26 13:29:44 +00:00
|
|
|
/*
|
|
|
|
* Context: task, can sleep
|
|
|
|
*/
|
2012-08-22 18:21:03 +00:00
|
|
|
void
|
|
|
|
qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
|
|
|
|
{
|
2015-06-04 22:57:20 +00:00
|
|
|
#if 0
|
|
|
|
uint16_t options = (requester_id << 15) | BIT_7;
|
|
|
|
#endif
|
|
|
|
uint16_t retry;
|
2012-08-22 18:21:03 +00:00
|
|
|
uint32_t data;
|
|
|
|
struct qla_hw_data *ha = base_vha->hw;
|
|
|
|
|
scsi: qla2xxx: Remove in_interrupt() from qla83xx-specific code
qla83xx_wait_logic() is used to control the frequency of device IDC lock
retries. If in_interrupt() is true, it does 20 loops of cpu_relax().
Otherwise, it sleeps for 100ms and yields the CPU.
While in_interrupt() is ill-defined and does not provide what the name
suggests, it is not needed here: that qla83xx_wait_logic() is exclusively
called by qla83xx_idc_lock() / unlock(), and they always run from process
context. Below is an analysis of all the idc lock/unlock callers, in order
of appearance:
- qla_os.c:
qla83xx_nic_core_unrecoverable_work(),
qla83xx_idc_state_handler_work(),
qla83xx_nic_core_reset_work(),
qla83xx_service_idc_aen(), all workqueue context
- qla_os.c: qla83xx_check_nic_core_fw_alive(), has msleep()
- qla_os.c: qla83xx_set_drv_presence(), called once from
qla2x00_abort_isp(), which is bound to process-context ->abort_isp()
hook. It also invokes wait_for_completion_timeout() through the chain
qla2x00_configure_hba() => qla24xx_link_initialize() =>
qla2x00_mailbox_command().
- qla_os.c: qla83xx_clear_drv_presence(), which is called from
qla2x00_abort_isp() discussed above, and from qla2x00_remove_one()
which is PCI process-context ->remove() hook.
- qla_os.c: qla83xx_need_reset_handler(), has a one second msleep() in
a loop.
- qla_os.c: qla83xx_device_bootstrap(), called only by
qla83xx_idc_state_handler(), which has multiple msleep()
invocations.
- qla_os.c: qla83xx_idc_state_handler(), multiple msleep()
invocations.
- qla_attr.c: qla2x00_sysfs_write_reset(), sysfs bin_attribute
->write() hook, process context
- qla_init.c: qla83xx_nic_core_fw_load()
=> qla_init.c: qla2x00_initialize_adapter()
=> bound to isp_operations ->initialize_adapter() hook
** => qla_os.c: qla2x00_probe_one(), PCI ->probe() process ctx
- qla_init.c: qla83xx_initiating_reset(), msleep() in a loop.
- qla_init.c: qla83xx_nic_core_reset(), called by
qla83xx_nic_core_reset_work(), workqueue context.
Remove the in_interrupt() check, and thus replace the entirety of
qla83xx_wait_logic() with an msleep(QLA83XX_WAIT_LOGIC_MS).
Mark qla83xx_idc_lock() / unlock() with "Context: task, can sleep".
Link: https://lore.kernel.org/r/20201126132952.2287996-7-bigeasy@linutronix.de
Cc: Nilesh Javali <njavali@marvell.com>
Cc: GR-QLogic-Storage-Upstream@marvell.com
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-11-26 13:29:44 +00:00
|
|
|
might_sleep();
|
|
|
|
|
2012-08-22 18:21:03 +00:00
|
|
|
/* IDC-unlock implementation using driver-unlock/lock-id
|
|
|
|
* remote registers
|
|
|
|
*/
|
|
|
|
retry = 0;
|
|
|
|
retry_unlock:
|
|
|
|
if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
|
|
|
|
== QLA_SUCCESS) {
|
|
|
|
if (data == ha->portnum) {
|
|
|
|
qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
|
|
|
|
/* Clearing lock-id by setting 0xff */
|
|
|
|
qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
|
|
|
|
} else if (retry < 10) {
|
|
|
|
/* SV: XXX: IDC unlock retrying needed here? */
|
|
|
|
|
|
|
|
/* Retry for IDC-unlock */
|
scsi: qla2xxx: Remove in_interrupt() from qla83xx-specific code
qla83xx_wait_logic() is used to control the frequency of device IDC lock
retries. If in_interrupt() is true, it does 20 loops of cpu_relax().
Otherwise, it sleeps for 100ms and yields the CPU.
While in_interrupt() is ill-defined and does not provide what the name
suggests, it is not needed here: that qla83xx_wait_logic() is exclusively
called by qla83xx_idc_lock() / unlock(), and they always run from process
context. Below is an analysis of all the idc lock/unlock callers, in order
of appearance:
- qla_os.c:
qla83xx_nic_core_unrecoverable_work(),
qla83xx_idc_state_handler_work(),
qla83xx_nic_core_reset_work(),
qla83xx_service_idc_aen(), all workqueue context
- qla_os.c: qla83xx_check_nic_core_fw_alive(), has msleep()
- qla_os.c: qla83xx_set_drv_presence(), called once from
qla2x00_abort_isp(), which is bound to process-context ->abort_isp()
hook. It also invokes wait_for_completion_timeout() through the chain
qla2x00_configure_hba() => qla24xx_link_initialize() =>
qla2x00_mailbox_command().
- qla_os.c: qla83xx_clear_drv_presence(), which is called from
qla2x00_abort_isp() discussed above, and from qla2x00_remove_one()
which is PCI process-context ->remove() hook.
- qla_os.c: qla83xx_need_reset_handler(), has a one second msleep() in
a loop.
- qla_os.c: qla83xx_device_bootstrap(), called only by
qla83xx_idc_state_handler(), which has multiple msleep()
invocations.
- qla_os.c: qla83xx_idc_state_handler(), multiple msleep()
invocations.
- qla_attr.c: qla2x00_sysfs_write_reset(), sysfs bin_attribute
->write() hook, process context
- qla_init.c: qla83xx_nic_core_fw_load()
=> qla_init.c: qla2x00_initialize_adapter()
=> bound to isp_operations ->initialize_adapter() hook
** => qla_os.c: qla2x00_probe_one(), PCI ->probe() process ctx
- qla_init.c: qla83xx_initiating_reset(), msleep() in a loop.
- qla_init.c: qla83xx_nic_core_reset(), called by
qla83xx_nic_core_reset_work(), workqueue context.
Remove the in_interrupt() check, and thus replace the entirety of
qla83xx_wait_logic() with an msleep(QLA83XX_WAIT_LOGIC_MS).
Mark qla83xx_idc_lock() / unlock() with "Context: task, can sleep".
Link: https://lore.kernel.org/r/20201126132952.2287996-7-bigeasy@linutronix.de
Cc: Nilesh Javali <njavali@marvell.com>
Cc: GR-QLogic-Storage-Upstream@marvell.com
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-11-26 13:29:44 +00:00
|
|
|
msleep(QLA83XX_WAIT_LOGIC_MS);
|
2012-08-22 18:21:03 +00:00
|
|
|
retry++;
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
|
2016-08-28 11:24:48 +00:00
|
|
|
"Failed to release IDC lock, retrying=%d\n", retry);
|
2012-08-22 18:21:03 +00:00
|
|
|
goto retry_unlock;
|
|
|
|
}
|
|
|
|
} else if (retry < 10) {
|
|
|
|
/* Retry for IDC-unlock */
|
scsi: qla2xxx: Remove in_interrupt() from qla83xx-specific code
qla83xx_wait_logic() is used to control the frequency of device IDC lock
retries. If in_interrupt() is true, it does 20 loops of cpu_relax().
Otherwise, it sleeps for 100ms and yields the CPU.
While in_interrupt() is ill-defined and does not provide what the name
suggests, it is not needed here: that qla83xx_wait_logic() is exclusively
called by qla83xx_idc_lock() / unlock(), and they always run from process
context. Below is an analysis of all the idc lock/unlock callers, in order
of appearance:
- qla_os.c:
qla83xx_nic_core_unrecoverable_work(),
qla83xx_idc_state_handler_work(),
qla83xx_nic_core_reset_work(),
qla83xx_service_idc_aen(), all workqueue context
- qla_os.c: qla83xx_check_nic_core_fw_alive(), has msleep()
- qla_os.c: qla83xx_set_drv_presence(), called once from
qla2x00_abort_isp(), which is bound to process-context ->abort_isp()
hook. It also invokes wait_for_completion_timeout() through the chain
qla2x00_configure_hba() => qla24xx_link_initialize() =>
qla2x00_mailbox_command().
- qla_os.c: qla83xx_clear_drv_presence(), which is called from
qla2x00_abort_isp() discussed above, and from qla2x00_remove_one()
which is PCI process-context ->remove() hook.
- qla_os.c: qla83xx_need_reset_handler(), has a one second msleep() in
a loop.
- qla_os.c: qla83xx_device_bootstrap(), called only by
qla83xx_idc_state_handler(), which has multiple msleep()
invocations.
- qla_os.c: qla83xx_idc_state_handler(), multiple msleep()
invocations.
- qla_attr.c: qla2x00_sysfs_write_reset(), sysfs bin_attribute
->write() hook, process context
- qla_init.c: qla83xx_nic_core_fw_load()
=> qla_init.c: qla2x00_initialize_adapter()
=> bound to isp_operations ->initialize_adapter() hook
** => qla_os.c: qla2x00_probe_one(), PCI ->probe() process ctx
- qla_init.c: qla83xx_initiating_reset(), msleep() in a loop.
- qla_init.c: qla83xx_nic_core_reset(), called by
qla83xx_nic_core_reset_work(), workqueue context.
Remove the in_interrupt() check, and thus replace the entirety of
qla83xx_wait_logic() with an msleep(QLA83XX_WAIT_LOGIC_MS).
Mark qla83xx_idc_lock() / unlock() with "Context: task, can sleep".
Link: https://lore.kernel.org/r/20201126132952.2287996-7-bigeasy@linutronix.de
Cc: Nilesh Javali <njavali@marvell.com>
Cc: GR-QLogic-Storage-Upstream@marvell.com
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-11-26 13:29:44 +00:00
|
|
|
msleep(QLA83XX_WAIT_LOGIC_MS);
|
2012-08-22 18:21:03 +00:00
|
|
|
retry++;
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
|
2016-08-28 11:24:48 +00:00
|
|
|
"Failed to read drv-lockid, retrying=%d\n", retry);
|
2012-08-22 18:21:03 +00:00
|
|
|
goto retry_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
2015-06-04 22:57:20 +00:00
|
|
|
#if 0
|
2012-08-22 18:21:03 +00:00
|
|
|
/* XXX: IDC-unlock implementation using access-control mbx */
|
|
|
|
retry = 0;
|
|
|
|
retry_unlock2:
|
|
|
|
if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
|
|
|
|
if (retry < 10) {
|
|
|
|
/* Retry for IDC-unlock */
|
scsi: qla2xxx: Remove in_interrupt() from qla83xx-specific code
qla83xx_wait_logic() is used to control the frequency of device IDC lock
retries. If in_interrupt() is true, it does 20 loops of cpu_relax().
Otherwise, it sleeps for 100ms and yields the CPU.
While in_interrupt() is ill-defined and does not provide what the name
suggests, it is not needed here: that qla83xx_wait_logic() is exclusively
called by qla83xx_idc_lock() / unlock(), and they always run from process
context. Below is an analysis of all the idc lock/unlock callers, in order
of appearance:
- qla_os.c:
qla83xx_nic_core_unrecoverable_work(),
qla83xx_idc_state_handler_work(),
qla83xx_nic_core_reset_work(),
qla83xx_service_idc_aen(), all workqueue context
- qla_os.c: qla83xx_check_nic_core_fw_alive(), has msleep()
- qla_os.c: qla83xx_set_drv_presence(), called once from
qla2x00_abort_isp(), which is bound to process-context ->abort_isp()
hook. It also invokes wait_for_completion_timeout() through the chain
qla2x00_configure_hba() => qla24xx_link_initialize() =>
qla2x00_mailbox_command().
- qla_os.c: qla83xx_clear_drv_presence(), which is called from
qla2x00_abort_isp() discussed above, and from qla2x00_remove_one()
which is PCI process-context ->remove() hook.
- qla_os.c: qla83xx_need_reset_handler(), has a one second msleep() in
a loop.
- qla_os.c: qla83xx_device_bootstrap(), called only by
qla83xx_idc_state_handler(), which has multiple msleep()
invocations.
- qla_os.c: qla83xx_idc_state_handler(), multiple msleep()
invocations.
- qla_attr.c: qla2x00_sysfs_write_reset(), sysfs bin_attribute
->write() hook, process context
- qla_init.c: qla83xx_nic_core_fw_load()
=> qla_init.c: qla2x00_initialize_adapter()
=> bound to isp_operations ->initialize_adapter() hook
** => qla_os.c: qla2x00_probe_one(), PCI ->probe() process ctx
- qla_init.c: qla83xx_initiating_reset(), msleep() in a loop.
- qla_init.c: qla83xx_nic_core_reset(), called by
qla83xx_nic_core_reset_work(), workqueue context.
Remove the in_interrupt() check, and thus replace the entirety of
qla83xx_wait_logic() with an msleep(QLA83XX_WAIT_LOGIC_MS).
Mark qla83xx_idc_lock() / unlock() with "Context: task, can sleep".
Link: https://lore.kernel.org/r/20201126132952.2287996-7-bigeasy@linutronix.de
Cc: Nilesh Javali <njavali@marvell.com>
Cc: GR-QLogic-Storage-Upstream@marvell.com
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-11-26 13:29:44 +00:00
|
|
|
msleep(QLA83XX_WAIT_LOGIC_MS);
|
2012-08-22 18:21:03 +00:00
|
|
|
retry++;
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
|
2016-08-28 11:24:48 +00:00
|
|
|
"Failed to release IDC lock, retrying=%d\n", retry);
|
2012-08-22 18:21:03 +00:00
|
|
|
goto retry_unlock2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
2015-06-04 22:57:20 +00:00
|
|
|
#endif
|
2012-08-22 18:21:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
__qla83xx_set_drv_presence(scsi_qla_host_t *vha)
|
|
|
|
{
|
|
|
|
int rval = QLA_SUCCESS;
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
uint32_t drv_presence;
|
|
|
|
|
|
|
|
rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
|
|
|
|
if (rval == QLA_SUCCESS) {
|
|
|
|
drv_presence |= (1 << ha->portnum);
|
|
|
|
rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
|
|
|
|
drv_presence);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
qla83xx_set_drv_presence(scsi_qla_host_t *vha)
|
|
|
|
{
|
|
|
|
int rval = QLA_SUCCESS;
|
|
|
|
|
|
|
|
qla83xx_idc_lock(vha, 0);
|
|
|
|
rval = __qla83xx_set_drv_presence(vha);
|
|
|
|
qla83xx_idc_unlock(vha, 0);
|
|
|
|
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
__qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
|
|
|
|
{
|
|
|
|
int rval = QLA_SUCCESS;
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
uint32_t drv_presence;
|
|
|
|
|
|
|
|
rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
|
|
|
|
if (rval == QLA_SUCCESS) {
|
|
|
|
drv_presence &= ~(1 << ha->portnum);
|
|
|
|
rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
|
|
|
|
drv_presence);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
|
|
|
|
{
|
|
|
|
int rval = QLA_SUCCESS;
|
|
|
|
|
|
|
|
qla83xx_idc_lock(vha, 0);
|
|
|
|
rval = __qla83xx_clear_drv_presence(vha);
|
|
|
|
qla83xx_idc_unlock(vha, 0);
|
|
|
|
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
2012-11-21 07:40:29 +00:00
|
|
|
static void
|
2012-08-22 18:21:03 +00:00
|
|
|
qla83xx_need_reset_handler(scsi_qla_host_t *vha)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
uint32_t drv_ack, drv_presence;
|
|
|
|
unsigned long ack_timeout;
|
|
|
|
|
|
|
|
/* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
|
|
|
|
ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
|
|
|
|
while (1) {
|
|
|
|
qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
|
|
|
|
qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
|
2012-11-21 07:40:36 +00:00
|
|
|
if ((drv_ack & drv_presence) == drv_presence)
|
2012-08-22 18:21:03 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (time_after_eq(jiffies, ack_timeout)) {
|
|
|
|
ql_log(ql_log_warn, vha, 0xb067,
|
|
|
|
"RESET ACK TIMEOUT! drv_presence=0x%x "
|
|
|
|
"drv_ack=0x%x\n", drv_presence, drv_ack);
|
|
|
|
/*
|
|
|
|
* The function(s) which did not ack in time are forced
|
|
|
|
* to withdraw any further participation in the IDC
|
|
|
|
* reset.
|
|
|
|
*/
|
|
|
|
if (drv_ack != drv_presence)
|
|
|
|
qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
|
|
|
|
drv_ack);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
qla83xx_idc_unlock(vha, 0);
|
|
|
|
msleep(1000);
|
|
|
|
qla83xx_idc_lock(vha, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
|
|
|
|
ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
|
|
|
|
}
|
|
|
|
|
2012-11-21 07:40:29 +00:00
|
|
|
static int
|
2012-08-22 18:21:03 +00:00
|
|
|
qla83xx_device_bootstrap(scsi_qla_host_t *vha)
|
|
|
|
{
|
|
|
|
int rval = QLA_SUCCESS;
|
|
|
|
uint32_t idc_control;
|
|
|
|
|
|
|
|
qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
|
|
|
|
ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
|
|
|
|
|
|
|
|
/* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
|
|
|
|
__qla83xx_get_idc_control(vha, &idc_control);
|
|
|
|
idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
|
|
|
|
__qla83xx_set_idc_control(vha, 0);
|
|
|
|
|
|
|
|
qla83xx_idc_unlock(vha, 0);
|
|
|
|
rval = qla83xx_restart_nic_firmware(vha);
|
|
|
|
qla83xx_idc_lock(vha, 0);
|
|
|
|
|
|
|
|
if (rval != QLA_SUCCESS) {
|
|
|
|
ql_log(ql_log_fatal, vha, 0xb06a,
|
|
|
|
"Failed to restart NIC f/w.\n");
|
|
|
|
qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
|
|
|
|
ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
|
|
|
|
} else {
|
|
|
|
ql_dbg(ql_dbg_p3p, vha, 0xb06c,
|
|
|
|
"Success in restarting nic f/w.\n");
|
|
|
|
qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
|
|
|
|
ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assumes idc_lock always held on entry */
|
|
|
|
int
|
|
|
|
qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha = base_vha->hw;
|
|
|
|
int rval = QLA_SUCCESS;
|
|
|
|
unsigned long dev_init_timeout;
|
|
|
|
uint32_t dev_state;
|
|
|
|
|
|
|
|
/* Wait for MAX-INIT-TIMEOUT for the device to go ready */
|
|
|
|
dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
|
|
|
|
if (time_after_eq(jiffies, dev_init_timeout)) {
|
|
|
|
ql_log(ql_log_warn, base_vha, 0xb06e,
|
|
|
|
"Initialization TIMEOUT!\n");
|
|
|
|
/* Init timeout. Disable further NIC Core
|
|
|
|
* communication.
|
|
|
|
*/
|
|
|
|
qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
|
|
|
|
QLA8XXX_DEV_FAILED);
|
|
|
|
ql_log(ql_log_info, base_vha, 0xb06f,
|
|
|
|
"HW State: FAILED.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
|
|
|
|
switch (dev_state) {
|
|
|
|
case QLA8XXX_DEV_READY:
|
|
|
|
if (ha->flags.nic_core_reset_owner)
|
|
|
|
qla83xx_idc_audit(base_vha,
|
|
|
|
IDC_AUDIT_COMPLETION);
|
|
|
|
ha->flags.nic_core_reset_owner = 0;
|
|
|
|
ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
|
|
|
|
"Reset_owner reset by 0x%x.\n",
|
|
|
|
ha->portnum);
|
|
|
|
goto exit;
|
|
|
|
case QLA8XXX_DEV_COLD:
|
|
|
|
if (ha->flags.nic_core_reset_owner)
|
|
|
|
rval = qla83xx_device_bootstrap(base_vha);
|
|
|
|
else {
|
|
|
|
/* Wait for AEN to change device-state */
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
msleep(1000);
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case QLA8XXX_DEV_INITIALIZING:
|
|
|
|
/* Wait for AEN to change device-state */
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
msleep(1000);
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
break;
|
|
|
|
case QLA8XXX_DEV_NEED_RESET:
|
|
|
|
if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
|
|
|
|
qla83xx_need_reset_handler(base_vha);
|
|
|
|
else {
|
|
|
|
/* Wait for AEN to change device-state */
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
msleep(1000);
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
}
|
|
|
|
/* reset timeout value after need reset handler */
|
|
|
|
dev_init_timeout = jiffies +
|
|
|
|
(ha->fcoe_dev_init_timeout * HZ);
|
|
|
|
break;
|
|
|
|
case QLA8XXX_DEV_NEED_QUIESCENT:
|
|
|
|
/* XXX: DEBUG for now */
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
msleep(1000);
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
break;
|
|
|
|
case QLA8XXX_DEV_QUIESCENT:
|
|
|
|
/* XXX: DEBUG for now */
|
|
|
|
if (ha->flags.quiesce_owner)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
msleep(1000);
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
dev_init_timeout = jiffies +
|
|
|
|
(ha->fcoe_dev_init_timeout * HZ);
|
|
|
|
break;
|
|
|
|
case QLA8XXX_DEV_FAILED:
|
|
|
|
if (ha->flags.nic_core_reset_owner)
|
|
|
|
qla83xx_idc_audit(base_vha,
|
|
|
|
IDC_AUDIT_COMPLETION);
|
|
|
|
ha->flags.nic_core_reset_owner = 0;
|
|
|
|
__qla83xx_clear_drv_presence(base_vha);
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
qla8xxx_dev_failed_handler(base_vha);
|
|
|
|
rval = QLA_FUNCTION_FAILED;
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
goto exit;
|
|
|
|
case QLA8XXX_BAD_VALUE:
|
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
msleep(1000);
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ql_log(ql_log_warn, base_vha, 0xb071,
|
2015-02-27 14:52:31 +00:00
|
|
|
"Unknown Device State: %x.\n", dev_state);
|
2012-08-22 18:21:03 +00:00
|
|
|
qla83xx_idc_unlock(base_vha, 0);
|
|
|
|
qla8xxx_dev_failed_handler(base_vha);
|
|
|
|
rval = QLA_FUNCTION_FAILED;
|
|
|
|
qla83xx_idc_lock(base_vha, 0);
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
exit:
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
2013-10-30 07:38:16 +00:00
|
|
|
void
|
|
|
|
qla2x00_disable_board_on_pci_error(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha = container_of(work, struct qla_hw_data,
|
|
|
|
board_disable);
|
|
|
|
struct pci_dev *pdev = ha->pdev;
|
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
|
|
|
|
|
|
|
ql_log(ql_log_warn, base_vha, 0x015b,
|
|
|
|
"Disabling adapter.\n");
|
|
|
|
|
2017-08-23 22:05:00 +00:00
|
|
|
if (!atomic_read(&pdev->enable_cnt)) {
|
|
|
|
ql_log(ql_log_info, base_vha, 0xfffc,
|
|
|
|
"PCI device disabled, no action req for PCI error=%lx\n",
|
|
|
|
base_vha->pci_flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-04-21 20:46:20 +00:00
|
|
|
/*
|
|
|
|
* if UNLOADING flag is already set, then continue unload,
|
|
|
|
* where it was set first.
|
|
|
|
*/
|
|
|
|
if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
|
|
|
|
return;
|
2017-01-20 06:28:00 +00:00
|
|
|
|
2020-04-21 20:46:20 +00:00
|
|
|
qla2x00_wait_for_sess_deletion(base_vha);
|
2013-10-30 07:38:16 +00:00
|
|
|
|
|
|
|
qla2x00_delete_all_vps(ha, base_vha);
|
|
|
|
|
|
|
|
qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
|
|
|
|
|
|
|
|
qla2x00_dfs_remove(base_vha);
|
|
|
|
|
|
|
|
qla84xx_put_chip(base_vha);
|
|
|
|
|
|
|
|
if (base_vha->timer_active)
|
|
|
|
qla2x00_stop_timer(base_vha);
|
|
|
|
|
|
|
|
base_vha->flags.online = 0;
|
|
|
|
|
|
|
|
qla2x00_destroy_deferred_work(ha);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not try to stop beacon blink as it will issue a mailbox
|
|
|
|
* command.
|
|
|
|
*/
|
|
|
|
qla2x00_free_sysfs_attr(base_vha, false);
|
|
|
|
|
|
|
|
fc_remove_host(base_vha->host);
|
|
|
|
|
|
|
|
scsi_remove_host(base_vha->host);
|
|
|
|
|
|
|
|
base_vha->flags.init_done = 0;
|
|
|
|
qla25xx_delete_queues(base_vha);
|
|
|
|
qla2x00_free_fcports(base_vha);
|
2016-12-12 22:40:09 +00:00
|
|
|
qla2x00_free_irqs(base_vha);
|
2013-10-30 07:38:16 +00:00
|
|
|
qla2x00_mem_free(ha);
|
|
|
|
qla82xx_md_free(base_vha);
|
|
|
|
qla2x00_free_queues(ha);
|
|
|
|
|
|
|
|
qla2x00_unmap_iobases(ha);
|
|
|
|
|
|
|
|
pci_release_selected_regions(ha->pdev, ha->bars);
|
|
|
|
pci_disable_pcie_error_reporting(pdev);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
|
2014-08-26 21:12:14 +00:00
|
|
|
/*
|
|
|
|
* Let qla2x00_remove_one cleanup qla_hw_data on device removal.
|
|
|
|
*/
|
2013-10-30 07:38:16 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**************************************************************************
|
|
|
|
* qla2x00_do_dpc
|
|
|
|
* This kernel thread is a task that is schedule by the interrupt handler
|
|
|
|
* to perform the background processing for interrupts.
|
|
|
|
*
|
|
|
|
* Notes:
|
|
|
|
* This task always run in the context of a kernel thread. It
|
|
|
|
* is kick-off by the driver's detect code and starts up
|
|
|
|
* up one per adapter. It immediately goes to sleep and waits for
|
|
|
|
* some fibre event. When either the interrupt handler or
|
|
|
|
* the timer routine detects a event it will one of the task
|
|
|
|
* bits then wake us up.
|
|
|
|
**************************************************************************/
|
|
|
|
static int
|
|
|
|
qla2x00_do_dpc(void *data)
|
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *base_vha;
|
|
|
|
struct qla_hw_data *ha;
|
2016-12-12 22:40:07 +00:00
|
|
|
uint32_t online;
|
|
|
|
struct qla_qpair *qpair;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
ha = (struct qla_hw_data *)data;
|
|
|
|
base_vha = pci_get_drvdata(ha->pdev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-03-11 10:09:12 +00:00
|
|
|
set_user_nice(current, MIN_NICE);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-01-27 21:12:37 +00:00
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
2006-02-14 17:46:22 +00:00
|
|
|
while (!kthread_should_stop()) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
|
|
|
|
"DPC handler sleeping.\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-02-14 17:46:22 +00:00
|
|
|
schedule();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-03-29 08:52:25 +00:00
|
|
|
if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags))
|
|
|
|
qla_pci_set_eeh_busy(base_vha);
|
|
|
|
|
2011-11-18 17:03:10 +00:00
|
|
|
if (!base_vha->flags.init_done || ha->flags.mbox_busy)
|
|
|
|
goto end_loop;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-12-16 05:29:46 +00:00
|
|
|
if (ha->flags.eeh_busy) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
|
|
|
|
"eeh_busy=%d.\n", ha->flags.eeh_busy);
|
2011-11-18 17:03:10 +00:00
|
|
|
goto end_loop;
|
2009-12-16 05:29:46 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
ha->dpc_active = 1;
|
|
|
|
|
2012-05-15 18:34:15 +00:00
|
|
|
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
|
|
|
|
"DPC handler waking up, dpc_flags=0x%lx.\n",
|
|
|
|
base_vha->dpc_flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-07-06 15:14:19 +00:00
|
|
|
if (test_bit(UNLOADING, &base_vha->dpc_flags))
|
|
|
|
break;
|
|
|
|
|
2013-08-27 05:37:28 +00:00
|
|
|
if (IS_P3P_TYPE(ha)) {
|
|
|
|
if (IS_QLA8044(ha)) {
|
|
|
|
if (test_and_clear_bit(ISP_UNRECOVERABLE,
|
|
|
|
&base_vha->dpc_flags)) {
|
|
|
|
qla8044_idc_lock(ha);
|
|
|
|
qla8044_wr_direct(base_vha,
|
|
|
|
QLA8044_CRB_DEV_STATE_INDEX,
|
|
|
|
QLA8XXX_DEV_FAILED);
|
|
|
|
qla8044_idc_unlock(ha);
|
|
|
|
ql_log(ql_log_info, base_vha, 0x4004,
|
|
|
|
"HW State: FAILED.\n");
|
|
|
|
qla8044_device_state_handler(base_vha);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
if (test_and_clear_bit(ISP_UNRECOVERABLE,
|
|
|
|
&base_vha->dpc_flags)) {
|
|
|
|
qla82xx_idc_lock(ha);
|
|
|
|
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
|
|
|
|
QLA8XXX_DEV_FAILED);
|
|
|
|
qla82xx_idc_unlock(ha);
|
|
|
|
ql_log(ql_log_info, base_vha, 0x0151,
|
|
|
|
"HW State: FAILED.\n");
|
|
|
|
qla82xx_device_state_handler(base_vha);
|
|
|
|
continue;
|
|
|
|
}
|
2010-04-13 00:59:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
|
|
|
|
&base_vha->dpc_flags)) {
|
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
|
|
|
|
"FCoE context reset scheduled.\n");
|
2010-04-13 00:59:55 +00:00
|
|
|
if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
|
|
|
|
&base_vha->dpc_flags))) {
|
|
|
|
if (qla82xx_fcoe_ctx_reset(base_vha)) {
|
|
|
|
/* FCoE-ctx reset failed.
|
|
|
|
* Escalate to chip-reset
|
|
|
|
*/
|
|
|
|
set_bit(ISP_ABORT_NEEDED,
|
|
|
|
&base_vha->dpc_flags);
|
|
|
|
}
|
|
|
|
clear_bit(ABORT_ISP_ACTIVE,
|
|
|
|
&base_vha->dpc_flags);
|
|
|
|
}
|
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
|
|
|
|
"FCoE context reset end.\n");
|
2010-04-13 00:59:55 +00:00
|
|
|
}
|
2013-03-28 12:21:23 +00:00
|
|
|
} else if (IS_QLAFX00(ha)) {
|
|
|
|
if (test_and_clear_bit(ISP_UNRECOVERABLE,
|
|
|
|
&base_vha->dpc_flags)) {
|
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
|
|
|
|
"Firmware Reset Recovery\n");
|
|
|
|
if (qlafx00_reset_initialize(base_vha)) {
|
|
|
|
/* Failed. Abort isp later. */
|
|
|
|
if (!test_bit(UNLOADING,
|
2014-05-05 09:47:57 +00:00
|
|
|
&base_vha->dpc_flags)) {
|
2013-03-28 12:21:23 +00:00
|
|
|
set_bit(ISP_UNRECOVERABLE,
|
|
|
|
&base_vha->dpc_flags);
|
|
|
|
ql_dbg(ql_dbg_dpc, base_vha,
|
|
|
|
0x4021,
|
|
|
|
"Reset Recovery Failed\n");
|
2014-05-05 09:47:57 +00:00
|
|
|
}
|
2013-03-28 12:21:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (test_and_clear_bit(FX00_TARGET_SCAN,
|
|
|
|
&base_vha->dpc_flags)) {
|
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
|
|
|
|
"ISPFx00 Target Scan scheduled\n");
|
|
|
|
if (qlafx00_rescan_isp(base_vha)) {
|
|
|
|
if (!test_bit(UNLOADING,
|
|
|
|
&base_vha->dpc_flags))
|
|
|
|
set_bit(ISP_UNRECOVERABLE,
|
|
|
|
&base_vha->dpc_flags);
|
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
|
|
|
|
"ISPFx00 Target Scan Failed\n");
|
|
|
|
}
|
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
|
|
|
|
"ISPFx00 Target Scan End\n");
|
|
|
|
}
|
2013-10-30 07:38:17 +00:00
|
|
|
if (test_and_clear_bit(FX00_HOST_INFO_RESEND,
|
|
|
|
&base_vha->dpc_flags)) {
|
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4023,
|
|
|
|
"ISPFx00 Host Info resend scheduled\n");
|
|
|
|
qlafx00_fx_disc(base_vha,
|
|
|
|
&base_vha->hw->mr.fcport,
|
|
|
|
FXDISC_REG_HOST_INFO);
|
|
|
|
}
|
2010-04-13 00:59:55 +00:00
|
|
|
}
|
|
|
|
|
2017-08-23 22:05:07 +00:00
|
|
|
if (test_and_clear_bit(DETECT_SFP_CHANGE,
|
2020-02-26 22:40:13 +00:00
|
|
|
&base_vha->dpc_flags)) {
|
|
|
|
/* Semantic:
|
|
|
|
* - NO-OP -- await next ISP-ABORT. Preferred method
|
|
|
|
* to minimize disruptions that will occur
|
|
|
|
* when a forced chip-reset occurs.
|
|
|
|
* - Force -- ISP-ABORT scheduled.
|
|
|
|
*/
|
|
|
|
/* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */
|
2017-08-23 22:05:07 +00:00
|
|
|
}
|
|
|
|
|
2018-07-18 21:29:54 +00:00
|
|
|
if (test_and_clear_bit
|
|
|
|
(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
|
|
|
|
!test_bit(UNLOADING, &base_vha->dpc_flags)) {
|
2018-08-31 18:24:37 +00:00
|
|
|
bool do_reset = true;
|
|
|
|
|
2018-09-11 17:18:18 +00:00
|
|
|
switch (base_vha->qlini_mode) {
|
2018-08-31 18:24:37 +00:00
|
|
|
case QLA2XXX_INI_MODE_ENABLED:
|
|
|
|
break;
|
|
|
|
case QLA2XXX_INI_MODE_DISABLED:
|
2018-09-11 17:18:18 +00:00
|
|
|
if (!qla_tgt_mode_enabled(base_vha) &&
|
|
|
|
!ha->flags.fw_started)
|
2018-08-31 18:24:37 +00:00
|
|
|
do_reset = false;
|
|
|
|
break;
|
|
|
|
case QLA2XXX_INI_MODE_DUAL:
|
2018-09-11 17:18:18 +00:00
|
|
|
if (!qla_dual_mode_enabled(base_vha) &&
|
|
|
|
!ha->flags.fw_started)
|
2018-08-31 18:24:37 +00:00
|
|
|
do_reset = false;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-08-31 18:24:37 +00:00
|
|
|
if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
|
2008-11-06 18:40:51 +00:00
|
|
|
&base_vha->dpc_flags))) {
|
scsi: qla2xxx: Fix warning after FC target reset
Currently, FC target reset finishes with the warning message:
[84010.596893] ------------[ cut here ]------------
[84010.596917] WARNING: CPU: 238 PID: 279973 at ../drivers/scsi/qla2xxx/qla_target.c:6644 qlt_enable_vha+0x1d0/0x260 [qla2xxx]
[84010.596918] Modules linked in: vrf af_packet 8021q garp mrp stp llc netlink_diag target_tatlin_tblock(OEX) dm_ec(OEX) ttln_rdma(OEX) dm_frontend(OEX) nvme_rdma nvmet tcm_qla2xxx iscsi_target_mod target_core_mod at24 nvmem_core pnv_php ipmi_watchdog ipmi_ssif vmx_crypto gf128mul crct10dif_vpmsum qla2xxx rpcrdma nvme_fc powernv_flash(X) nvme_fabrics uio_pdrv_genirq mtd rtc_opal(X) ibmpowernv(X) opal_prd(X) uio scsi_transport_fc i2c_opal(X) ses enclosure ipmi_poweroff ast i2c_algo_bit ttm bmc_mcu(OEX) drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops drm drm_panel_orientation_quirks agpgart nfsd auth_rpcgss nfs_acl ipmi_powernv(X) lockd ipmi_devintf ipmi_msghandler grace dummy ext4 crc16 jbd2 mbcache sd_mod rdma_ucm ib_iser rdma_cm ib_umad iw_cm ib_ipoib libiscsi scsi_transport_iscsi ib_cm
[84010.596975] configfs mlx5_ib ib_uverbs ib_core mlx5_core crc32c_vpmsum xhci_pci xhci_hcd mpt3sas(OEX) tg3 usbcore mlxfw tls raid_class libphy scsi_transport_sas devlink ptp pps_core nvme nvme_core sunrpc dm_mirror dm_region_hash dm_log sg dm_multipath dm_mod scsi_dh_rdac scsi_dh_emc scsi_dh_alua scsi_mod autofs4
[84010.597001] Supported: Yes, External
[84010.597004] CPU: 238 PID: 279973 Comm: bash Tainted: G OE 4.12.14-197.29-default #1 SLE15-SP1
[84010.597006] task: c000000a104c0000 task.stack: c000000b52188000
[84010.597007] NIP: d00000001ffd7f78 LR: d00000001ffd7f6c CTR: c0000000001676c0
[84010.597008] REGS: c000000b5218b910 TRAP: 0700 Tainted: G OE (4.12.14-197.29-default)
[84010.597008] MSR: 900000010282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]>
[84010.597015] CR: 48242424 XER: 00000000
[84010.597016] CFAR: d00000001ff45d08 SOFTE: 1
GPR00: d00000001ffd7f6c c000000b5218bb90 d00000002001b228 0000000000000102
GPR04: 0000000000000001 0000000000000001 00013d91ed0a5e2d 0000000000000000
GPR08: c000000007793300 0000000000000000 0000000000000000 c000000a086e7818
GPR12: 0000000000002200 c000000007793300 0000000000000000 000000012bc937c0
GPR16: 000000012bbf7ed0 0000000000000000 000000012bc3dd10 0000000000000000
GPR20: 000000012bc4db28 0000010036442810 000000012bc97828 000000012bc96c70
GPR24: 00000100365b1550 0000000000000000 00000100363f3d80 c000000be20d3080
GPR28: c000000bda7eae00 c000000be20db7e8 c000000be20d3778 c000000be20db7e8
[84010.597042] NIP [d00000001ffd7f78] qlt_enable_vha+0x1d0/0x260 [qla2xxx]
[84010.597051] LR [d00000001ffd7f6c] qlt_enable_vha+0x1c4/0x260 [qla2xxx]
[84010.597051] Call Trace:
[84010.597061] [c000000b5218bb90] [d00000001ffd7f6c] qlt_enable_vha+0x1c4/0x260 [qla2xxx] (unreliable)
[84010.597064] [c000000b5218bc20] [d000000009820b6c] tcm_qla2xxx_tpg_enable_store+0xc4/0x130 [tcm_qla2xxx]
[84010.597067] [c000000b5218bcb0] [d0000000185d0e68] configfs_write_file+0xd0/0x190 [configfs]
[84010.597072] [c000000b5218bd00] [c0000000003d0edc] __vfs_write+0x3c/0x1e0
[84010.597074] [c000000b5218bd90] [c0000000003d2ea8] vfs_write+0xd8/0x220
[84010.597076] [c000000b5218bde0] [c0000000003d4ddc] SyS_write+0x6c/0x110
[84010.597079] [c000000b5218be30] [c00000000000b188] system_call+0x3c/0x130
[84010.597080] Instruction dump:
[84010.597082] 7d0050a8 7d084b78 7d0051ad 40c2fff4 7fa3eb78 4bf73965 60000000 7fa3eb78
[84010.597086] 4bf6dcd9 60000000 2fa30000 419eff40 <0fe00000> 4bffff38 e95f0058 a12a0180
[84010.597090] ---[ end trace e32abaf6e6fee826 ]---
To reproduce:
echo 0x7fffffff > /sys/module/qla2xxx/parameters/logging
modprobe target_core_mod
modprobe tcm_qla2xxx
mkdir /sys/kernel/config/target/qla2xxx
mkdir /sys/kernel/config/target/qla2xxx/<port-name>
mkdir /sys/kernel/config/target/qla2xxx/<port-name>/tpgt_1
echo 1 > /sys/kernel/config/target/qla2xxx/<port-name>/tpgt_1/enable
echo 0 > /sys/kernel/config/target/qla2xxx/<port-name>/tpgt_1/enable
echo 1 > /sys/kernel/config/target/qla2xxx/<port-name>/tpgt_1/enable
SYSTEM START
kernel: pid 327:drivers/scsi/qla2xxx/qla_init.c:2174 qla2x00_initialize_adapter(): vha->flags.online 0x0
<...>
kernel: pid 327:drivers/scsi/qla2xxx/qla_os.c:3444 qla2x00_probe_one(): vha->flags.online 0x1
echo 1 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:86:a6:2a/tpgt_1/enable
kernel: pid 348:drivers/scsi/qla2xxx/qla_init.c:6641 qla2x00_abort_isp_cleanup(): vha->flags.online 0x0, ISP_ABORT_NEEDED 0x0
<...>
kernel: pid 348:drivers/scsi/qla2xxx/qla_init.c:6998 qla2x00_restart_isp(): vha->flags.online 0x0
echo 0 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:86:a6:2a/tpgt_1/enable
kernel: pid 348:drivers/scsi/qla2xxx/qla_init.c:6641 qla2x00_abort_isp_cleanup(): vha->flags.online 0x0, ISP_ABORT_NEEDED 0x0
<...>
kernel: pid 1404:drivers/scsi/qla2xxx/qla_os.c:1107 qla2x00_wait_for_hba_online(): base_vha->flags.online 0x0
echo 1 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:86:a6:2a/tpgt_1/enable
kernel: pid 1404:drivers/scsi/qla2xxx/qla_os.c:1107 qla2x00_wait_for_hba_online(): base_vha->flags.online 0x0
kernel: -----------[ cut here ]-----------
kernel: WARNING: CPU: 1 PID: 1404 at drivers/scsi/qla2xxx/qla_target.c:6654 qlt_enable_vha+0x1e0/0x280 [qla2xxx]
The issue happens because no real ISP reset is executed. The
qla2x00_abort_isp(scsi_qla_host_t *vha) function expects that
vha->flags.online will be not zero for ISP reset procedure. This patch
sets vha->flags.online to 1 before calling ->abort_isp() for starting the
ISP reset.
Link: https://lore.kernel.org/r/1d7b21bf9f7676643239eb3d60eaca7cfa505cf0.camel@yadro.com
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
Signed-off-by: Viacheslav Dubeyko <v.dubeiko@yadro.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-04-10 08:07:08 +00:00
|
|
|
base_vha->flags.online = 1;
|
2018-08-31 18:24:37 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
|
|
|
|
"ISP abort scheduled.\n");
|
2010-04-13 00:59:55 +00:00
|
|
|
if (ha->isp_ops->abort_isp(base_vha)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/* failed. retry later */
|
|
|
|
set_bit(ISP_ABORT_NEEDED,
|
2008-11-06 18:40:51 +00:00
|
|
|
&base_vha->dpc_flags);
|
2008-01-31 20:33:51 +00:00
|
|
|
}
|
2008-11-06 18:40:51 +00:00
|
|
|
clear_bit(ABORT_ISP_ACTIVE,
|
|
|
|
&base_vha->dpc_flags);
|
2018-08-31 18:24:37 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
|
|
|
|
"ISP abort end.\n");
|
2008-01-31 20:33:51 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 21:44:24 +00:00
|
|
|
if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) {
|
|
|
|
if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
|
|
|
|
qla24xx_process_purex_list
|
|
|
|
(&base_vha->purex_list);
|
|
|
|
clear_bit(PROCESS_PUREX_IOCB,
|
|
|
|
&base_vha->dpc_flags);
|
|
|
|
}
|
2020-02-12 21:44:18 +00:00
|
|
|
}
|
|
|
|
|
[SCSI] qla2xxx: Test and clear FCPORT_UPDATE_NEEDED atomically.
When the qla2xxx driver loses access to multiple, remote ports, there is a race
condition which can occur which will keep the request stuck on a scsi request
queue indefinitely.
This bad state occurred do to a race condition with how the FCPORT_UPDATE_NEEDED
bit is set in qla2x00_schedule_rport_del(), and how it is cleared in
qla2x00_do_dpc(). The problem port has its drport pointer set, but it has never
been processed by the driver to inform the fc transport that the port has been
lost. qla2x00_schedule_rport_del() sets drport, and then sets the
FCPORT_UPDATE_NEEDED bit. In qla2x00_do_dpc(), the port lists are walked and
any drport pointer is handled and the fc transport informed of the port loss,
then the FCPORT_UPDATE_NEEDED bit is cleared. This leaves a race where the
dpc thread is processing one port removal, another port removal is marked
with a call to qla2x00_schedule_rport_del(), and the dpc thread clears the
bit for both removals, even though only the first removal was actually
handled. Until another event occurs to set FCPORT_UPDATE_NEEDED, the later
port removal is never finished and qla2xxx stays in a bad state which causes
requests to become stuck on request queues.
This patch updates the driver to test and clear FCPORT_UPDATE_NEEDED
atomically. This ensures the port state changes are processed and not lost.
Signed-off-by: David Jeffery <djeffery@redhat.com>
Signed-off-by: Chad Dupuis <chad.dupuis@qlogic.com>
Cc: stable@vger.kernel.org
Signed-off-by: Saurav Kashyap <saurav.kashyap@qlogic.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2012-11-21 07:39:54 +00:00
|
|
|
if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
|
|
|
|
&base_vha->dpc_flags)) {
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_update_fcports(base_vha);
|
2008-07-24 15:31:49 +00:00
|
|
|
}
|
2006-01-20 22:53:13 +00:00
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
if (IS_QLAFX00(ha))
|
|
|
|
goto loop_resync_check;
|
|
|
|
|
2010-12-22 00:00:14 +00:00
|
|
|
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
|
|
|
|
"Quiescence mode scheduled.\n");
|
2013-08-27 05:37:28 +00:00
|
|
|
if (IS_P3P_TYPE(ha)) {
|
|
|
|
if (IS_QLA82XX(ha))
|
|
|
|
qla82xx_device_state_handler(base_vha);
|
|
|
|
if (IS_QLA8044(ha))
|
|
|
|
qla8044_device_state_handler(base_vha);
|
2012-08-22 18:21:06 +00:00
|
|
|
clear_bit(ISP_QUIESCE_NEEDED,
|
|
|
|
&base_vha->dpc_flags);
|
|
|
|
if (!ha->flags.quiesce_owner) {
|
|
|
|
qla2x00_perform_loop_resync(base_vha);
|
2013-08-27 05:37:28 +00:00
|
|
|
if (IS_QLA82XX(ha)) {
|
|
|
|
qla82xx_idc_lock(ha);
|
|
|
|
qla82xx_clear_qsnt_ready(
|
|
|
|
base_vha);
|
|
|
|
qla82xx_idc_unlock(ha);
|
|
|
|
} else if (IS_QLA8044(ha)) {
|
|
|
|
qla8044_idc_lock(ha);
|
|
|
|
qla8044_clear_qsnt_ready(
|
|
|
|
base_vha);
|
|
|
|
qla8044_idc_unlock(ha);
|
|
|
|
}
|
2012-08-22 18:21:06 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
clear_bit(ISP_QUIESCE_NEEDED,
|
|
|
|
&base_vha->dpc_flags);
|
|
|
|
qla2x00_quiesce_io(base_vha);
|
2010-12-22 00:00:14 +00:00
|
|
|
}
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
|
|
|
|
"Quiescence mode end.\n");
|
2010-12-22 00:00:14 +00:00
|
|
|
}
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
if (test_and_clear_bit(RESET_MARKER_NEEDED,
|
2013-03-28 12:21:23 +00:00
|
|
|
&base_vha->dpc_flags) &&
|
2008-11-06 18:40:51 +00:00
|
|
|
(!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
|
|
|
|
"Reset marker scheduled.\n");
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_rst_aen(base_vha);
|
|
|
|
clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
|
|
|
|
"Reset marker end.\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Retry each device up to login retry count */
|
2017-12-04 22:45:06 +00:00
|
|
|
if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
|
2008-11-06 18:40:51 +00:00
|
|
|
!test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
|
|
|
|
atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-12-04 22:45:06 +00:00
|
|
|
if (!base_vha->relogin_jif ||
|
|
|
|
time_after_eq(jiffies, base_vha->relogin_jif)) {
|
|
|
|
base_vha->relogin_jif = jiffies + HZ;
|
|
|
|
clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
|
|
|
|
|
2017-12-28 20:33:16 +00:00
|
|
|
ql_dbg(ql_dbg_disc, base_vha, 0x400d,
|
2017-12-04 22:45:06 +00:00
|
|
|
"Relogin scheduled.\n");
|
2017-12-28 20:33:16 +00:00
|
|
|
qla24xx_post_relogin_work(base_vha);
|
2017-12-04 22:45:06 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2013-03-28 12:21:23 +00:00
|
|
|
loop_resync_check:
|
2008-11-06 18:40:51 +00:00
|
|
|
if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
|
2013-03-28 12:21:23 +00:00
|
|
|
&base_vha->dpc_flags)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
|
|
|
|
"Loop resync scheduled.\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
|
2008-11-06 18:40:51 +00:00
|
|
|
&base_vha->dpc_flags))) {
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-07-09 14:23:26 +00:00
|
|
|
qla2x00_loop_resync(base_vha);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
clear_bit(LOOP_RESYNC_ACTIVE,
|
|
|
|
&base_vha->dpc_flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
|
|
|
|
"Loop resync end.\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
if (IS_QLAFX00(ha))
|
|
|
|
goto intr_on_check;
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
|
|
|
|
atomic_read(&base_vha->loop_state) == LOOP_READY) {
|
|
|
|
clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
|
|
|
|
qla2xxx_flash_npiv_conf(base_vha);
|
2008-09-12 04:22:50 +00:00
|
|
|
}
|
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
intr_on_check:
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!ha->interrupts_on)
|
2007-07-19 22:06:00 +00:00
|
|
|
ha->isp_ops->enable_intrs(ha);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
if (test_and_clear_bit(BEACON_BLINK_NEEDED,
|
2014-04-11 20:54:40 +00:00
|
|
|
&base_vha->dpc_flags)) {
|
|
|
|
if (ha->beacon_blink_led == 1)
|
|
|
|
ha->isp_ops->beacon_blink(base_vha);
|
|
|
|
}
|
2006-02-01 00:05:07 +00:00
|
|
|
|
2016-12-12 22:40:07 +00:00
|
|
|
/* qpair online check */
|
|
|
|
if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
|
|
|
|
&base_vha->dpc_flags)) {
|
|
|
|
if (ha->flags.eeh_busy ||
|
|
|
|
ha->flags.pci_channel_io_perm_failure)
|
|
|
|
online = 0;
|
|
|
|
else
|
|
|
|
online = 1;
|
|
|
|
|
|
|
|
mutex_lock(&ha->mq_lock);
|
|
|
|
list_for_each_entry(qpair, &base_vha->qp_list,
|
|
|
|
qp_list_elem)
|
|
|
|
qpair->online = online;
|
|
|
|
mutex_unlock(&ha->mq_lock);
|
|
|
|
}
|
|
|
|
|
2021-03-29 08:52:21 +00:00
|
|
|
if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
|
|
|
|
&base_vha->dpc_flags)) {
|
|
|
|
u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold;
|
|
|
|
|
|
|
|
if (threshold > ha->orig_fw_xcb_count)
|
|
|
|
threshold = ha->orig_fw_xcb_count;
|
|
|
|
|
2017-07-21 16:32:25 +00:00
|
|
|
ql_log(ql_log_info, base_vha, 0xffffff,
|
2021-03-29 08:52:21 +00:00
|
|
|
"SET ZIO Activity exchange threshold to %d.\n",
|
|
|
|
threshold);
|
|
|
|
if (qla27xx_set_zio_threshold(base_vha, threshold)) {
|
2017-07-21 16:32:25 +00:00
|
|
|
ql_log(ql_log_info, base_vha, 0xffffff,
|
2021-03-29 08:52:21 +00:00
|
|
|
"Unable to SET ZIO Activity exchange threshold to %d.\n",
|
|
|
|
threshold);
|
2017-07-21 16:32:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
if (!IS_QLAFX00(ha))
|
|
|
|
qla2x00_do_dpc_all_vps(base_vha);
|
2007-07-05 20:16:51 +00:00
|
|
|
|
2018-08-02 20:16:44 +00:00
|
|
|
if (test_and_clear_bit(N2N_LINK_RESET,
|
|
|
|
&base_vha->dpc_flags)) {
|
|
|
|
qla2x00_lip_reset(base_vha);
|
|
|
|
}
|
|
|
|
|
2021-06-19 05:24:27 +00:00
|
|
|
if (test_bit(HEARTBEAT_CHK, &base_vha->dpc_flags)) {
|
|
|
|
/*
|
|
|
|
* if there is a mb in progress then that's
|
|
|
|
* enough of a check to see if fw is still ticking.
|
|
|
|
*/
|
|
|
|
if (!ha->flags.mbox_busy && base_vha->flags.init_done)
|
|
|
|
qla_no_op_mb(base_vha);
|
|
|
|
|
|
|
|
clear_bit(HEARTBEAT_CHK, &base_vha->dpc_flags);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
ha->dpc_active = 0;
|
2011-11-18 17:03:10 +00:00
|
|
|
end_loop:
|
2011-01-27 21:12:37 +00:00
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
2005-04-16 22:20:36 +00:00
|
|
|
} /* End of while(1) */
|
2011-01-27 21:12:37 +00:00
|
|
|
__set_current_state(TASK_RUNNING);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
|
|
|
|
"DPC handler exiting.\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that nobody tries to wake us up again.
|
|
|
|
*/
|
|
|
|
ha->dpc_active = 0;
|
|
|
|
|
2009-08-20 18:06:05 +00:00
|
|
|
/* Cleanup any residual CTX SRBs. */
|
|
|
|
qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
|
|
|
|
|
2006-02-14 17:46:22 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2xxx_wake_dpc(struct scsi_qla_host *vha)
|
2006-02-14 17:46:22 +00:00
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2008-08-14 04:37:01 +00:00
|
|
|
struct task_struct *t = ha->dpc_thread;
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
|
2008-08-14 04:37:01 +00:00
|
|
|
wake_up_process(t);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* qla2x00_rst_aen
|
|
|
|
* Processes asynchronous reset.
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* ha = adapter block pointer.
|
|
|
|
*/
|
|
|
|
static void
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_rst_aen(scsi_qla_host_t *vha)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
if (vha->flags.online && !vha->flags.reset_active &&
|
|
|
|
!atomic_read(&vha->loop_down_timer) &&
|
|
|
|
!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
2008-11-06 18:40:51 +00:00
|
|
|
clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Issue marker command only when we are going to start
|
|
|
|
* the I/O.
|
|
|
|
*/
|
2008-11-06 18:40:51 +00:00
|
|
|
vha->marker_needed = 1;
|
|
|
|
} while (!atomic_read(&vha->loop_down_timer) &&
|
|
|
|
(test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-19 05:24:27 +00:00
|
|
|
static bool qla_do_heartbeat(struct scsi_qla_host *vha)
|
|
|
|
{
|
|
|
|
u64 cmd_cnt, prev_cmd_cnt;
|
|
|
|
bool do_hb = false;
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* if cmds are still pending down in fw, then do hb */
|
|
|
|
if (ha->base_qpair->cmd_cnt != ha->base_qpair->cmd_completion_cnt) {
|
|
|
|
do_hb = true;
|
|
|
|
goto skip;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ha->max_qpairs; i++) {
|
|
|
|
if (ha->queue_pair_map[i] &&
|
|
|
|
ha->queue_pair_map[i]->cmd_cnt !=
|
|
|
|
ha->queue_pair_map[i]->cmd_completion_cnt) {
|
|
|
|
do_hb = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
skip:
|
|
|
|
prev_cmd_cnt = ha->prev_cmd_cnt;
|
|
|
|
cmd_cnt = ha->base_qpair->cmd_cnt;
|
|
|
|
for (i = 0; i < ha->max_qpairs; i++) {
|
|
|
|
if (ha->queue_pair_map[i])
|
|
|
|
cmd_cnt += ha->queue_pair_map[i]->cmd_cnt;
|
|
|
|
}
|
|
|
|
ha->prev_cmd_cnt = cmd_cnt;
|
|
|
|
|
|
|
|
if (!do_hb && ((cmd_cnt - prev_cmd_cnt) > 50))
|
|
|
|
/*
|
|
|
|
* IOs are completing before periodic hb check.
|
|
|
|
* IOs seems to be running, do hb for sanity check.
|
|
|
|
*/
|
|
|
|
do_hb = true;
|
|
|
|
|
|
|
|
return do_hb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qla_heart_beat(struct scsi_qla_host *vha)
|
|
|
|
{
|
|
|
|
if (vha->vp_idx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (qla_do_heartbeat(vha)) {
|
|
|
|
set_bit(HEARTBEAT_CHK, &vha->dpc_flags);
|
|
|
|
qla2xxx_wake_dpc(vha);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**************************************************************************
|
|
|
|
* qla2x00_timer
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* One second timer
|
|
|
|
*
|
|
|
|
* Context: Interrupt
|
|
|
|
***************************************************************************/
|
2007-07-05 20:16:51 +00:00
|
|
|
void
|
2017-09-03 20:23:32 +00:00
|
|
|
qla2x00_timer(struct timer_list *t)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-09-03 20:23:32 +00:00
|
|
|
scsi_qla_host_t *vha = from_timer(vha, t, timer);
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long cpu_flags = 0;
|
|
|
|
int start_dpc = 0;
|
|
|
|
int index;
|
|
|
|
srb_t *sp;
|
2009-12-16 05:29:46 +00:00
|
|
|
uint16_t w;
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2008-12-10 00:45:39 +00:00
|
|
|
struct req_que *req;
|
2021-01-11 09:31:28 +00:00
|
|
|
unsigned long flags;
|
|
|
|
fc_port_t *fcport = NULL;
|
2009-12-16 05:29:46 +00:00
|
|
|
|
2010-09-03 22:20:50 +00:00
|
|
|
if (ha->flags.eeh_busy) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_timer, vha, 0x6000,
|
|
|
|
"EEH = %d, restarting timer.\n",
|
|
|
|
ha->flags.eeh_busy);
|
2010-09-03 22:20:50 +00:00
|
|
|
qla2x00_restart_timer(vha, WATCH_INTERVAL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-10-30 07:38:16 +00:00
|
|
|
/*
|
|
|
|
* Hardware read to raise pending EEH errors during mailbox waits. If
|
|
|
|
* the read returns -1 then disable the board.
|
|
|
|
*/
|
|
|
|
if (!pci_channel_offline(ha->pdev)) {
|
2009-12-16 05:29:46 +00:00
|
|
|
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
|
2014-08-26 21:11:41 +00:00
|
|
|
qla2x00_check_reg16_for_disconnect(vha, w);
|
2013-10-30 07:38:16 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-05-10 18:18:18 +00:00
|
|
|
/* Make sure qla82xx_watchdog is run only for physical port */
|
2013-08-27 05:37:28 +00:00
|
|
|
if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
|
2010-12-22 00:00:14 +00:00
|
|
|
if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
|
|
|
|
start_dpc++;
|
2013-08-27 05:37:28 +00:00
|
|
|
if (IS_QLA82XX(ha))
|
|
|
|
qla82xx_watchdog(vha);
|
|
|
|
else if (IS_QLA8044(ha))
|
|
|
|
qla8044_watchdog(vha);
|
2010-12-22 00:00:14 +00:00
|
|
|
}
|
|
|
|
|
2013-03-28 12:21:23 +00:00
|
|
|
if (!vha->vp_idx && IS_QLAFX00(ha))
|
|
|
|
qlafx00_timer_routine(vha);
|
|
|
|
|
2021-01-11 09:31:28 +00:00
|
|
|
if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
|
|
|
|
vha->link_down_time++;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
|
|
|
|
list_for_each_entry(fcport, &vha->vp_fcports, list) {
|
|
|
|
if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
|
|
|
|
fcport->tgt_link_down_time++;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Loop down handler. */
|
2008-11-06 18:40:51 +00:00
|
|
|
if (atomic_read(&vha->loop_down_timer) > 0 &&
|
2011-03-30 18:46:26 +00:00
|
|
|
!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
|
|
|
|
!(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
|
2008-11-06 18:40:51 +00:00
|
|
|
&& vha->flags.online) {
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
if (atomic_read(&vha->loop_down_timer) ==
|
|
|
|
vha->loop_down_abort_time) {
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_info, vha, 0x6008,
|
|
|
|
"Loop down - aborting the queues before time expires.\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
if (!IS_QLA2100(ha) && vha->link_down_timeout)
|
|
|
|
atomic_set(&vha->loop_state, LOOP_DEAD);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-01-12 20:59:48 +00:00
|
|
|
/*
|
|
|
|
* Schedule an ISP abort to return any FCP2-device
|
|
|
|
* commands.
|
|
|
|
*/
|
2007-07-05 20:16:51 +00:00
|
|
|
/* NPIV - scan physical port only */
|
2008-11-06 18:40:51 +00:00
|
|
|
if (!vha->vp_idx) {
|
2007-07-05 20:16:51 +00:00
|
|
|
spin_lock_irqsave(&ha->hardware_lock,
|
|
|
|
cpu_flags);
|
2008-12-10 00:45:39 +00:00
|
|
|
req = ha->req_q_map[0];
|
2007-07-05 20:16:51 +00:00
|
|
|
for (index = 1;
|
2013-01-30 08:34:37 +00:00
|
|
|
index < req->num_outstanding_cmds;
|
2007-07-05 20:16:51 +00:00
|
|
|
index++) {
|
|
|
|
fc_port_t *sfcp;
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
sp = req->outstanding_cmds[index];
|
2007-07-05 20:16:51 +00:00
|
|
|
if (!sp)
|
|
|
|
continue;
|
2017-06-14 03:47:16 +00:00
|
|
|
if (sp->cmd_type != TYPE_SRB)
|
|
|
|
continue;
|
2012-02-09 19:15:36 +00:00
|
|
|
if (sp->type != SRB_SCSI_CMD)
|
2009-08-20 18:06:04 +00:00
|
|
|
continue;
|
2007-07-05 20:16:51 +00:00
|
|
|
sfcp = sp->fcport;
|
2010-01-12 20:59:48 +00:00
|
|
|
if (!(sfcp->flags & FCF_FCP2_DEVICE))
|
2007-07-05 20:16:51 +00:00
|
|
|
continue;
|
2005-04-17 20:06:53 +00:00
|
|
|
|
2011-03-30 18:46:26 +00:00
|
|
|
if (IS_QLA82XX(ha))
|
|
|
|
set_bit(FCOE_CTX_RESET_NEEDED,
|
|
|
|
&vha->dpc_flags);
|
|
|
|
else
|
|
|
|
set_bit(ISP_ABORT_NEEDED,
|
2008-11-06 18:40:51 +00:00
|
|
|
&vha->dpc_flags);
|
2007-07-05 20:16:51 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock,
|
2008-11-06 18:40:51 +00:00
|
|
|
cpu_flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
start_dpc++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if the loop has been down for 4 minutes, reinit adapter */
|
2008-11-06 18:40:51 +00:00
|
|
|
if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
|
2009-08-25 18:36:19 +00:00
|
|
|
if (!(vha->device_flags & DFLG_NO_CABLE)) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_warn, vha, 0x6009,
|
2005-04-16 22:20:36 +00:00
|
|
|
"Loop down - aborting ISP.\n");
|
|
|
|
|
2011-03-30 18:46:26 +00:00
|
|
|
if (IS_QLA82XX(ha))
|
|
|
|
set_bit(FCOE_CTX_RESET_NEEDED,
|
|
|
|
&vha->dpc_flags);
|
|
|
|
else
|
|
|
|
set_bit(ISP_ABORT_NEEDED,
|
|
|
|
&vha->dpc_flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_timer, vha, 0x600a,
|
|
|
|
"Loop down - seconds remaining %d.\n",
|
|
|
|
atomic_read(&vha->loop_down_timer));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2011-05-10 18:18:18 +00:00
|
|
|
/* Check if beacon LED needs to be blinked for physical host only */
|
|
|
|
if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
|
2011-08-16 18:31:45 +00:00
|
|
|
/* There is no beacon_blink function for ISP82xx */
|
2013-08-27 05:37:28 +00:00
|
|
|
if (!IS_P3P_TYPE(ha)) {
|
2011-08-16 18:31:45 +00:00
|
|
|
set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
|
|
|
|
start_dpc++;
|
|
|
|
}
|
2006-02-01 00:05:07 +00:00
|
|
|
}
|
|
|
|
|
2021-08-17 05:13:08 +00:00
|
|
|
/* check if edif running */
|
|
|
|
if (vha->hw->flags.edif_enabled)
|
|
|
|
qla_edif_timer(vha);
|
|
|
|
|
2008-04-24 22:21:23 +00:00
|
|
|
/* Process any deferred work. */
|
2017-12-28 20:33:16 +00:00
|
|
|
if (!list_empty(&vha->work_list)) {
|
|
|
|
unsigned long flags;
|
|
|
|
bool q = false;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&vha->work_lock, flags);
|
|
|
|
if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
|
|
|
|
q = true;
|
|
|
|
spin_unlock_irqrestore(&vha->work_lock, flags);
|
|
|
|
if (q)
|
|
|
|
queue_work(vha->hw->wq, &vha->iocb_work);
|
|
|
|
}
|
2008-04-24 22:21:23 +00:00
|
|
|
|
2017-06-21 20:48:42 +00:00
|
|
|
/*
|
|
|
|
* FC-NVME
|
|
|
|
* see if the active AEN count has changed from what was last reported.
|
|
|
|
*/
|
2020-09-04 04:51:22 +00:00
|
|
|
index = atomic_read(&ha->nvme_active_aen_cnt);
|
2019-04-02 21:24:32 +00:00
|
|
|
if (!vha->vp_idx &&
|
2020-09-04 04:51:22 +00:00
|
|
|
(index != ha->nvme_last_rptd_aen) &&
|
2019-04-02 21:24:32 +00:00
|
|
|
ha->zio_mode == QLA_ZIO_MODE_6 &&
|
|
|
|
!ha->flags.host_shutting_down) {
|
2021-03-29 08:52:21 +00:00
|
|
|
ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
|
2017-06-21 20:48:42 +00:00
|
|
|
ql_log(ql_log_info, vha, 0x3002,
|
2018-09-04 21:19:14 +00:00
|
|
|
"nvme: Sched: Set ZIO exchange threshold to %d.\n",
|
|
|
|
ha->nvme_last_rptd_aen);
|
2021-03-29 08:52:21 +00:00
|
|
|
set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
|
2018-09-04 21:19:14 +00:00
|
|
|
start_dpc++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vha->vp_idx &&
|
2020-09-04 04:51:22 +00:00
|
|
|
atomic_read(&ha->zio_threshold) != ha->last_zio_threshold &&
|
|
|
|
IS_ZIO_THRESHOLD_CAPABLE(ha)) {
|
2018-09-04 21:19:14 +00:00
|
|
|
ql_log(ql_log_info, vha, 0x3002,
|
|
|
|
"Sched: Set ZIO exchange threshold to %d.\n",
|
|
|
|
ha->last_zio_threshold);
|
|
|
|
ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
|
2017-07-21 16:32:25 +00:00
|
|
|
set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
|
|
|
|
start_dpc++;
|
2017-06-21 20:48:42 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Schedule the DPC routine if needed */
|
2008-11-06 18:40:51 +00:00
|
|
|
if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
|
|
|
|
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
|
|
|
|
test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
|
2005-04-16 22:20:36 +00:00
|
|
|
start_dpc ||
|
2008-11-06 18:40:51 +00:00
|
|
|
test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
|
|
|
|
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
|
2010-04-13 00:59:55 +00:00
|
|
|
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
|
|
|
|
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
|
2008-11-06 18:40:51 +00:00
|
|
|
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
|
2020-02-12 21:44:18 +00:00
|
|
|
test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
|
|
|
|
test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_timer, vha, 0x600b,
|
|
|
|
"isp_abort_needed=%d loop_resync_needed=%d "
|
|
|
|
"fcport_update_needed=%d start_dpc=%d "
|
|
|
|
"reset_marker_needed=%d",
|
|
|
|
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
|
|
|
|
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
|
|
|
|
test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
|
|
|
|
start_dpc,
|
|
|
|
test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
|
|
|
|
ql_dbg(ql_dbg_timer, vha, 0x600c,
|
|
|
|
"beacon_blink_needed=%d isp_unrecoverable=%d "
|
|
|
|
"fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
|
2020-02-12 21:44:18 +00:00
|
|
|
"relogin_needed=%d, Process_purex_iocb=%d.\n",
|
2011-07-14 19:00:13 +00:00
|
|
|
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
|
|
|
|
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
|
|
|
|
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
|
|
|
|
test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
|
2020-02-12 21:44:18 +00:00
|
|
|
test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
|
|
|
|
test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2xxx_wake_dpc(vha);
|
2011-07-14 19:00:13 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-06-19 05:24:27 +00:00
|
|
|
qla_heart_beat(vha);
|
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_restart_timer(vha, WATCH_INTERVAL);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-11-09 23:49:04 +00:00
|
|
|
/* Firmware interface routines. */
|
|
|
|
|
|
|
|
#define FW_ISP21XX 0
|
|
|
|
#define FW_ISP22XX 1
|
|
|
|
#define FW_ISP2300 2
|
|
|
|
#define FW_ISP2322 3
|
2006-03-09 22:27:18 +00:00
|
|
|
#define FW_ISP24XX 4
|
2007-07-20 03:37:34 +00:00
|
|
|
#define FW_ISP25XX 5
|
2009-01-05 19:18:11 +00:00
|
|
|
#define FW_ISP81XX 6
|
2010-04-13 00:59:55 +00:00
|
|
|
#define FW_ISP82XX 7
|
2012-02-09 19:15:34 +00:00
|
|
|
#define FW_ISP2031 8
|
|
|
|
#define FW_ISP8031 9
|
2014-04-11 20:54:13 +00:00
|
|
|
#define FW_ISP27XX 10
|
2019-03-12 18:08:13 +00:00
|
|
|
#define FW_ISP28XX 11
|
2005-11-09 23:49:04 +00:00
|
|
|
|
2006-10-02 19:00:48 +00:00
|
|
|
#define FW_FILE_ISP21XX "ql2100_fw.bin"
|
|
|
|
#define FW_FILE_ISP22XX "ql2200_fw.bin"
|
|
|
|
#define FW_FILE_ISP2300 "ql2300_fw.bin"
|
|
|
|
#define FW_FILE_ISP2322 "ql2322_fw.bin"
|
|
|
|
#define FW_FILE_ISP24XX "ql2400_fw.bin"
|
2007-07-20 03:37:34 +00:00
|
|
|
#define FW_FILE_ISP25XX "ql2500_fw.bin"
|
2009-01-05 19:18:11 +00:00
|
|
|
#define FW_FILE_ISP81XX "ql8100_fw.bin"
|
2010-04-13 00:59:55 +00:00
|
|
|
#define FW_FILE_ISP82XX "ql8200_fw.bin"
|
2012-02-09 19:15:34 +00:00
|
|
|
#define FW_FILE_ISP2031 "ql2600_fw.bin"
|
|
|
|
#define FW_FILE_ISP8031 "ql8300_fw.bin"
|
2014-04-11 20:54:13 +00:00
|
|
|
#define FW_FILE_ISP27XX "ql2700_fw.bin"
|
2019-03-12 18:08:13 +00:00
|
|
|
#define FW_FILE_ISP28XX "ql2800_fw.bin"
|
2014-02-26 09:15:06 +00:00
|
|
|
|
2006-10-02 19:00:48 +00:00
|
|
|
|
2008-05-13 05:21:10 +00:00
|
|
|
static DEFINE_MUTEX(qla_fw_lock);
|
2005-11-09 23:49:04 +00:00
|
|
|
|
2019-03-12 18:08:13 +00:00
|
|
|
static struct fw_blob qla_fw_blobs[] = {
|
2006-10-02 19:00:48 +00:00
|
|
|
{ .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
|
|
|
|
{ .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
|
|
|
|
{ .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
|
|
|
|
{ .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
|
|
|
|
{ .name = FW_FILE_ISP24XX, },
|
2007-07-20 03:37:34 +00:00
|
|
|
{ .name = FW_FILE_ISP25XX, },
|
2009-01-05 19:18:11 +00:00
|
|
|
{ .name = FW_FILE_ISP81XX, },
|
2010-04-13 00:59:55 +00:00
|
|
|
{ .name = FW_FILE_ISP82XX, },
|
2012-02-09 19:15:34 +00:00
|
|
|
{ .name = FW_FILE_ISP2031, },
|
|
|
|
{ .name = FW_FILE_ISP8031, },
|
2014-04-11 20:54:13 +00:00
|
|
|
{ .name = FW_FILE_ISP27XX, },
|
2019-03-12 18:08:13 +00:00
|
|
|
{ .name = FW_FILE_ISP28XX, },
|
|
|
|
{ .name = NULL, },
|
2005-11-09 23:49:04 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct fw_blob *
|
2008-11-06 18:40:51 +00:00
|
|
|
qla2x00_request_firmware(scsi_qla_host_t *vha)
|
2005-11-09 23:49:04 +00:00
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2005-11-09 23:49:04 +00:00
|
|
|
struct fw_blob *blob;
|
|
|
|
|
|
|
|
if (IS_QLA2100(ha)) {
|
|
|
|
blob = &qla_fw_blobs[FW_ISP21XX];
|
|
|
|
} else if (IS_QLA2200(ha)) {
|
|
|
|
blob = &qla_fw_blobs[FW_ISP22XX];
|
2006-03-09 22:27:18 +00:00
|
|
|
} else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
|
2005-11-09 23:49:04 +00:00
|
|
|
blob = &qla_fw_blobs[FW_ISP2300];
|
2006-03-09 22:27:18 +00:00
|
|
|
} else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
|
2005-11-09 23:49:04 +00:00
|
|
|
blob = &qla_fw_blobs[FW_ISP2322];
|
2008-04-03 20:13:26 +00:00
|
|
|
} else if (IS_QLA24XX_TYPE(ha)) {
|
2005-11-09 23:49:04 +00:00
|
|
|
blob = &qla_fw_blobs[FW_ISP24XX];
|
2007-07-20 03:37:34 +00:00
|
|
|
} else if (IS_QLA25XX(ha)) {
|
|
|
|
blob = &qla_fw_blobs[FW_ISP25XX];
|
2009-01-05 19:18:11 +00:00
|
|
|
} else if (IS_QLA81XX(ha)) {
|
|
|
|
blob = &qla_fw_blobs[FW_ISP81XX];
|
2010-04-13 00:59:55 +00:00
|
|
|
} else if (IS_QLA82XX(ha)) {
|
|
|
|
blob = &qla_fw_blobs[FW_ISP82XX];
|
2012-02-09 19:15:34 +00:00
|
|
|
} else if (IS_QLA2031(ha)) {
|
|
|
|
blob = &qla_fw_blobs[FW_ISP2031];
|
|
|
|
} else if (IS_QLA8031(ha)) {
|
|
|
|
blob = &qla_fw_blobs[FW_ISP8031];
|
2014-04-11 20:54:13 +00:00
|
|
|
} else if (IS_QLA27XX(ha)) {
|
|
|
|
blob = &qla_fw_blobs[FW_ISP27XX];
|
2019-03-12 18:08:13 +00:00
|
|
|
} else if (IS_QLA28XX(ha)) {
|
|
|
|
blob = &qla_fw_blobs[FW_ISP28XX];
|
2012-02-21 07:29:40 +00:00
|
|
|
} else {
|
|
|
|
return NULL;
|
2005-11-09 23:49:04 +00:00
|
|
|
}
|
|
|
|
|
2019-03-12 18:08:13 +00:00
|
|
|
if (!blob->name)
|
|
|
|
return NULL;
|
|
|
|
|
2008-05-13 05:21:10 +00:00
|
|
|
mutex_lock(&qla_fw_lock);
|
2005-11-09 23:49:04 +00:00
|
|
|
if (blob->fw)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_warn, vha, 0x0063,
|
|
|
|
"Failed to load firmware image (%s).\n", blob->name);
|
2005-11-09 23:49:04 +00:00
|
|
|
blob->fw = NULL;
|
|
|
|
blob = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2008-05-13 05:21:10 +00:00
|
|
|
mutex_unlock(&qla_fw_lock);
|
2005-11-09 23:49:04 +00:00
|
|
|
return blob;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qla2x00_release_firmware(void)
|
|
|
|
{
|
2019-03-12 18:08:13 +00:00
|
|
|
struct fw_blob *blob;
|
2005-11-09 23:49:04 +00:00
|
|
|
|
2008-05-13 05:21:10 +00:00
|
|
|
mutex_lock(&qla_fw_lock);
|
2019-03-12 18:08:13 +00:00
|
|
|
for (blob = qla_fw_blobs; blob->name; blob++)
|
|
|
|
release_firmware(blob->fw);
|
2008-05-13 05:21:10 +00:00
|
|
|
mutex_unlock(&qla_fw_lock);
|
2005-11-09 23:49:04 +00:00
|
|
|
}
|
|
|
|
|
2019-05-06 20:52:19 +00:00
|
|
|
static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
|
|
|
struct qla_qpair *qpair = NULL;
|
2021-08-10 04:37:14 +00:00
|
|
|
struct scsi_qla_host *vp, *tvp;
|
2019-05-06 20:52:19 +00:00
|
|
|
fc_port_t *fcport;
|
|
|
|
int i;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2021-03-29 08:52:25 +00:00
|
|
|
ql_dbg(ql_dbg_aer, vha, 0x9000,
|
|
|
|
"%s\n", __func__);
|
2019-05-06 20:52:19 +00:00
|
|
|
ha->chip_reset++;
|
|
|
|
|
|
|
|
ha->base_qpair->chip_reset = ha->chip_reset;
|
|
|
|
for (i = 0; i < ha->max_qpairs; i++) {
|
|
|
|
if (ha->queue_pair_map[i])
|
|
|
|
ha->queue_pair_map[i]->chip_reset =
|
|
|
|
ha->base_qpair->chip_reset;
|
|
|
|
}
|
|
|
|
|
2021-03-29 08:52:25 +00:00
|
|
|
/*
|
|
|
|
* purge mailbox might take a while. Slot Reset/chip reset
|
|
|
|
* will take care of the purge
|
|
|
|
*/
|
2019-05-06 20:52:19 +00:00
|
|
|
|
|
|
|
mutex_lock(&ha->mq_lock);
|
2021-03-29 08:52:25 +00:00
|
|
|
ha->base_qpair->online = 0;
|
2019-05-06 20:52:19 +00:00
|
|
|
list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
|
|
|
|
qpair->online = 0;
|
2021-03-29 08:52:25 +00:00
|
|
|
wmb();
|
2019-05-06 20:52:19 +00:00
|
|
|
mutex_unlock(&ha->mq_lock);
|
|
|
|
|
2019-12-17 22:06:04 +00:00
|
|
|
qla2x00_mark_all_devices_lost(vha);
|
2019-05-06 20:52:19 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&ha->vport_slock, flags);
|
2021-08-10 04:37:14 +00:00
|
|
|
list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
|
2019-05-06 20:52:19 +00:00
|
|
|
atomic_inc(&vp->vref_count);
|
|
|
|
spin_unlock_irqrestore(&ha->vport_slock, flags);
|
2019-12-17 22:06:04 +00:00
|
|
|
qla2x00_mark_all_devices_lost(vp);
|
2019-05-06 20:52:19 +00:00
|
|
|
spin_lock_irqsave(&ha->vport_slock, flags);
|
|
|
|
atomic_dec(&vp->vref_count);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&ha->vport_slock, flags);
|
|
|
|
|
|
|
|
/* Clear all async request states across all VPs. */
|
|
|
|
list_for_each_entry(fcport, &vha->vp_fcports, list)
|
|
|
|
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ha->vport_slock, flags);
|
2021-08-10 04:37:14 +00:00
|
|
|
list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
|
2019-05-06 20:52:19 +00:00
|
|
|
atomic_inc(&vp->vref_count);
|
|
|
|
spin_unlock_irqrestore(&ha->vport_slock, flags);
|
|
|
|
list_for_each_entry(fcport, &vp->vp_fcports, list)
|
|
|
|
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
|
|
|
|
spin_lock_irqsave(&ha->vport_slock, flags);
|
|
|
|
atomic_dec(&vp->vref_count);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&ha->vport_slock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-09-20 21:07:36 +00:00
|
|
|
static pci_ers_result_t
|
|
|
|
qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
|
|
|
{
|
2009-12-16 05:29:46 +00:00
|
|
|
scsi_qla_host_t *vha = pci_get_drvdata(pdev);
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
2021-03-29 08:52:25 +00:00
|
|
|
pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET;
|
2009-12-16 05:29:46 +00:00
|
|
|
|
2021-03-29 08:52:25 +00:00
|
|
|
ql_log(ql_log_warn, vha, 0x9000,
|
|
|
|
"PCI error detected, state %x.\n", state);
|
|
|
|
ha->pci_error_state = QLA_PCI_ERR_DETECTED;
|
2009-03-24 16:08:18 +00:00
|
|
|
|
2017-08-23 22:05:00 +00:00
|
|
|
if (!atomic_read(&pdev->enable_cnt)) {
|
|
|
|
ql_log(ql_log_info, vha, 0xffff,
|
|
|
|
"PCI device is disabled,state %x\n", state);
|
2021-03-29 08:52:25 +00:00
|
|
|
ret = PCI_ERS_RESULT_NEED_RESET;
|
|
|
|
goto out;
|
2017-08-23 22:05:00 +00:00
|
|
|
}
|
|
|
|
|
2007-09-20 21:07:36 +00:00
|
|
|
switch (state) {
|
|
|
|
case pci_channel_io_normal:
|
2009-12-16 05:29:46 +00:00
|
|
|
ha->flags.eeh_busy = 0;
|
2017-10-13 22:43:22 +00:00
|
|
|
if (ql2xmqsupport || ql2xnvmeenable) {
|
2016-12-12 22:40:07 +00:00
|
|
|
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
|
|
|
|
qla2xxx_wake_dpc(vha);
|
|
|
|
}
|
2021-03-29 08:52:25 +00:00
|
|
|
ret = PCI_ERS_RESULT_CAN_RECOVER;
|
|
|
|
break;
|
2007-09-20 21:07:36 +00:00
|
|
|
case pci_channel_io_frozen:
|
2021-03-29 08:52:25 +00:00
|
|
|
qla_pci_set_eeh_busy(vha);
|
|
|
|
ret = PCI_ERS_RESULT_NEED_RESET;
|
|
|
|
break;
|
2007-09-20 21:07:36 +00:00
|
|
|
case pci_channel_io_perm_failure:
|
2009-12-16 05:29:46 +00:00
|
|
|
ha->flags.pci_channel_io_perm_failure = 1;
|
|
|
|
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
|
2017-10-13 22:43:22 +00:00
|
|
|
if (ql2xmqsupport || ql2xnvmeenable) {
|
2016-12-12 22:40:07 +00:00
|
|
|
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
|
|
|
|
qla2xxx_wake_dpc(vha);
|
|
|
|
}
|
2021-03-29 08:52:25 +00:00
|
|
|
ret = PCI_ERS_RESULT_DISCONNECT;
|
2007-09-20 21:07:36 +00:00
|
|
|
}
|
2021-03-29 08:52:25 +00:00
|
|
|
out:
|
|
|
|
ql_dbg(ql_dbg_aer, vha, 0x600d,
|
|
|
|
"PCI error detected returning [%x].\n", ret);
|
|
|
|
return ret;
|
2007-09-20 21:07:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static pci_ers_result_t
|
|
|
|
qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
int risc_paused = 0;
|
|
|
|
uint32_t stat;
|
|
|
|
unsigned long flags;
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
|
|
|
|
struct qla_hw_data *ha = base_vha->hw;
|
2007-09-20 21:07:36 +00:00
|
|
|
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
|
|
|
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
|
|
|
|
|
2021-03-29 08:52:25 +00:00
|
|
|
ql_log(ql_log_warn, base_vha, 0x9000,
|
|
|
|
"mmio enabled\n");
|
|
|
|
|
|
|
|
ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
|
2010-09-03 22:20:57 +00:00
|
|
|
if (IS_QLA82XX(ha))
|
|
|
|
return PCI_ERS_RESULT_RECOVERED;
|
|
|
|
|
2007-09-20 21:07:36 +00:00
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
if (IS_QLA2100(ha) || IS_QLA2200(ha)){
|
2020-05-18 21:17:08 +00:00
|
|
|
stat = rd_reg_word(®->hccr);
|
2007-09-20 21:07:36 +00:00
|
|
|
if (stat & HCCR_RISC_PAUSE)
|
|
|
|
risc_paused = 1;
|
|
|
|
} else if (IS_QLA23XX(ha)) {
|
2020-05-18 21:17:08 +00:00
|
|
|
stat = rd_reg_dword(®->u.isp2300.host_status);
|
2007-09-20 21:07:36 +00:00
|
|
|
if (stat & HSR_RISC_PAUSED)
|
|
|
|
risc_paused = 1;
|
|
|
|
} else if (IS_FWI2_CAPABLE(ha)) {
|
2020-05-18 21:17:08 +00:00
|
|
|
stat = rd_reg_dword(®24->host_status);
|
2007-09-20 21:07:36 +00:00
|
|
|
if (stat & HSRX_RISC_PAUSED)
|
|
|
|
risc_paused = 1;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
|
|
if (risc_paused) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_info, base_vha, 0x9003,
|
|
|
|
"RISC paused -- mmio_enabled, Dumping firmware.\n");
|
2020-05-18 21:17:00 +00:00
|
|
|
qla2xxx_dump_fw(base_vha);
|
2021-03-29 08:52:25 +00:00
|
|
|
}
|
|
|
|
/* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
|
|
|
|
ql_dbg(ql_dbg_aer, base_vha, 0x600d,
|
|
|
|
"mmio enabled returning.\n");
|
|
|
|
return PCI_ERS_RESULT_NEED_RESET;
|
2007-09-20 21:07:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static pci_ers_result_t
|
|
|
|
qla2xxx_pci_slot_reset(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
|
|
|
|
struct qla_hw_data *ha = base_vha->hw;
|
2019-05-06 20:52:19 +00:00
|
|
|
int rc;
|
|
|
|
struct qla_qpair *qpair = NULL;
|
2007-12-20 04:28:09 +00:00
|
|
|
|
2021-03-29 08:52:25 +00:00
|
|
|
ql_log(ql_log_warn, base_vha, 0x9004,
|
|
|
|
"Slot Reset.\n");
|
2009-12-16 05:29:46 +00:00
|
|
|
|
2021-03-29 08:52:25 +00:00
|
|
|
ha->pci_error_state = QLA_PCI_SLOT_RESET;
|
2010-01-12 21:02:46 +00:00
|
|
|
/* Workaround: qla2xxx driver which access hardware earlier
|
|
|
|
* needs error state to be pci_channel_io_online.
|
|
|
|
* Otherwise mailbox command timesout.
|
|
|
|
*/
|
|
|
|
pdev->error_state = pci_channel_io_normal;
|
|
|
|
|
|
|
|
pci_restore_state(pdev);
|
|
|
|
|
2010-02-18 18:07:29 +00:00
|
|
|
/* pci_restore_state() clears the saved_state flag of the device
|
|
|
|
* save restored state which resets saved_state flag
|
|
|
|
*/
|
|
|
|
pci_save_state(pdev);
|
|
|
|
|
2007-12-20 04:28:09 +00:00
|
|
|
if (ha->mem_only)
|
|
|
|
rc = pci_enable_device_mem(pdev);
|
|
|
|
else
|
|
|
|
rc = pci_enable_device(pdev);
|
2007-09-20 21:07:36 +00:00
|
|
|
|
2007-12-20 04:28:09 +00:00
|
|
|
if (rc) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_warn, base_vha, 0x9005,
|
2007-09-20 21:07:36 +00:00
|
|
|
"Can't re-enable PCI device after reset.\n");
|
2010-09-03 22:20:50 +00:00
|
|
|
goto exit_slot_reset;
|
2007-09-20 21:07:36 +00:00
|
|
|
}
|
|
|
|
|
2010-01-12 21:02:46 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
if (ha->isp_ops->pci_config(base_vha))
|
2010-09-03 22:20:50 +00:00
|
|
|
goto exit_slot_reset;
|
|
|
|
|
2019-05-06 20:52:19 +00:00
|
|
|
mutex_lock(&ha->mq_lock);
|
|
|
|
list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
|
|
|
|
qpair->online = 1;
|
|
|
|
mutex_unlock(&ha->mq_lock);
|
2009-12-16 05:29:46 +00:00
|
|
|
|
2021-03-29 08:52:25 +00:00
|
|
|
ha->flags.eeh_busy = 0;
|
2019-05-06 20:52:19 +00:00
|
|
|
base_vha->flags.online = 1;
|
2008-11-06 18:40:51 +00:00
|
|
|
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
|
2021-03-29 08:52:25 +00:00
|
|
|
ha->isp_ops->abort_isp(base_vha);
|
2008-11-06 18:40:51 +00:00
|
|
|
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
|
2007-09-20 21:07:36 +00:00
|
|
|
|
2021-03-29 08:52:25 +00:00
|
|
|
if (qla2x00_isp_reg_stat(ha)) {
|
|
|
|
ha->flags.eeh_busy = 1;
|
|
|
|
qla_pci_error_cleanup(base_vha);
|
|
|
|
ql_log(ql_log_warn, base_vha, 0x9005,
|
|
|
|
"Device unable to recover from PCI error.\n");
|
|
|
|
} else {
|
|
|
|
ret = PCI_ERS_RESULT_RECOVERED;
|
|
|
|
}
|
2010-01-12 21:02:46 +00:00
|
|
|
|
2010-09-03 22:20:50 +00:00
|
|
|
exit_slot_reset:
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_dbg(ql_dbg_aer, base_vha, 0x900e,
|
2021-03-29 08:52:25 +00:00
|
|
|
"Slot Reset returning %x.\n", ret);
|
2009-12-16 05:29:46 +00:00
|
|
|
|
2007-09-20 21:07:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qla2xxx_pci_resume(struct pci_dev *pdev)
|
|
|
|
{
|
2008-11-06 18:40:51 +00:00
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
|
|
|
|
struct qla_hw_data *ha = base_vha->hw;
|
2007-09-20 21:07:36 +00:00
|
|
|
int ret;
|
|
|
|
|
2021-03-29 08:52:25 +00:00
|
|
|
ql_log(ql_log_warn, base_vha, 0x900f,
|
|
|
|
"Pci Resume.\n");
|
2009-12-16 05:29:46 +00:00
|
|
|
|
2019-05-06 20:52:19 +00:00
|
|
|
|
2008-11-06 18:40:51 +00:00
|
|
|
ret = qla2x00_wait_for_hba_online(base_vha);
|
2007-09-20 21:07:36 +00:00
|
|
|
if (ret != QLA_SUCCESS) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, base_vha, 0x9002,
|
|
|
|
"The device failed to resume I/O from slot/link_reset.\n");
|
2007-09-20 21:07:36 +00:00
|
|
|
}
|
2021-03-29 08:52:25 +00:00
|
|
|
ha->pci_error_state = QLA_PCI_RESUME;
|
|
|
|
ql_dbg(ql_dbg_aer, base_vha, 0x600d,
|
|
|
|
"Pci Resume returning.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
|
|
|
bool do_cleanup = false;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (ha->flags.eeh_busy)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&base_vha->work_lock, flags);
|
|
|
|
if (!ha->flags.eeh_busy) {
|
|
|
|
ha->flags.eeh_busy = 1;
|
|
|
|
do_cleanup = true;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&base_vha->work_lock, flags);
|
|
|
|
|
|
|
|
if (do_cleanup)
|
|
|
|
qla_pci_error_cleanup(base_vha);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this routine will schedule a task to pause IO from interrupt context
|
|
|
|
* if caller sees a PCIE error event (register read = 0xf's)
|
|
|
|
*/
|
|
|
|
void qla_schedule_eeh_work(struct scsi_qla_host *vha)
|
|
|
|
{
|
|
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
|
|
|
|
|
|
|
if (ha->flags.eeh_busy)
|
|
|
|
return;
|
|
|
|
|
|
|
|
set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags);
|
|
|
|
qla2xxx_wake_dpc(base_vha);
|
2007-09-20 21:07:36 +00:00
|
|
|
}
|
|
|
|
|
2019-01-25 07:23:40 +00:00
|
|
|
static void
|
|
|
|
qla_pci_reset_prepare(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
|
|
|
|
struct qla_hw_data *ha = base_vha->hw;
|
|
|
|
struct qla_qpair *qpair;
|
|
|
|
|
|
|
|
ql_log(ql_log_warn, base_vha, 0xffff,
|
|
|
|
"%s.\n", __func__);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PCI FLR/function reset is about to reset the
|
|
|
|
* slot. Stop the chip to stop all DMA access.
|
|
|
|
* It is assumed that pci_reset_done will be called
|
|
|
|
* after FLR to resume Chip operation.
|
|
|
|
*/
|
|
|
|
ha->flags.eeh_busy = 1;
|
|
|
|
mutex_lock(&ha->mq_lock);
|
|
|
|
list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
|
|
|
|
qpair->online = 0;
|
|
|
|
mutex_unlock(&ha->mq_lock);
|
|
|
|
|
|
|
|
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
|
|
|
|
qla2x00_abort_isp_cleanup(base_vha);
|
|
|
|
qla2x00_abort_all_cmds(base_vha, DID_RESET << 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qla_pci_reset_done(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
|
|
|
|
struct qla_hw_data *ha = base_vha->hw;
|
|
|
|
struct qla_qpair *qpair;
|
|
|
|
|
|
|
|
ql_log(ql_log_warn, base_vha, 0xffff,
|
|
|
|
"%s.\n", __func__);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FLR just completed by PCI layer. Resume adapter
|
|
|
|
*/
|
|
|
|
ha->flags.eeh_busy = 0;
|
|
|
|
mutex_lock(&ha->mq_lock);
|
|
|
|
list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
|
|
|
|
qpair->online = 1;
|
|
|
|
mutex_unlock(&ha->mq_lock);
|
|
|
|
|
|
|
|
base_vha->flags.online = 1;
|
|
|
|
ha->isp_ops->abort_isp(base_vha);
|
|
|
|
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
|
|
|
|
}
|
|
|
|
|
2016-12-12 22:40:08 +00:00
|
|
|
static int qla2xxx_map_queues(struct Scsi_Host *shost)
|
|
|
|
{
|
2017-12-04 22:44:59 +00:00
|
|
|
int rc;
|
2016-12-12 22:40:08 +00:00
|
|
|
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
|
2019-03-12 01:00:30 +00:00
|
|
|
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
2016-12-12 22:40:08 +00:00
|
|
|
|
2019-02-16 00:42:55 +00:00
|
|
|
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
|
2018-10-29 19:06:14 +00:00
|
|
|
rc = blk_mq_map_queues(qmap);
|
2017-12-04 22:44:59 +00:00
|
|
|
else
|
2019-01-11 17:40:47 +00:00
|
|
|
rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
|
2017-12-04 22:44:59 +00:00
|
|
|
return rc;
|
2016-12-12 22:40:08 +00:00
|
|
|
}
|
|
|
|
|
2019-04-04 19:44:43 +00:00
|
|
|
struct scsi_host_template qla2xxx_driver_template = {
|
|
|
|
.module = THIS_MODULE,
|
|
|
|
.name = QLA2XXX_DRIVER_NAME,
|
|
|
|
.queuecommand = qla2xxx_queuecommand,
|
|
|
|
|
|
|
|
.eh_timed_out = fc_eh_timed_out,
|
|
|
|
.eh_abort_handler = qla2xxx_eh_abort,
|
2021-04-27 05:09:14 +00:00
|
|
|
.eh_should_retry_cmd = fc_eh_should_retry_cmd,
|
2019-04-04 19:44:43 +00:00
|
|
|
.eh_device_reset_handler = qla2xxx_eh_device_reset,
|
|
|
|
.eh_target_reset_handler = qla2xxx_eh_target_reset,
|
|
|
|
.eh_bus_reset_handler = qla2xxx_eh_bus_reset,
|
|
|
|
.eh_host_reset_handler = qla2xxx_eh_host_reset,
|
|
|
|
|
|
|
|
.slave_configure = qla2xxx_slave_configure,
|
|
|
|
|
|
|
|
.slave_alloc = qla2xxx_slave_alloc,
|
|
|
|
.slave_destroy = qla2xxx_slave_destroy,
|
|
|
|
.scan_finished = qla2xxx_scan_finished,
|
|
|
|
.scan_start = qla2xxx_scan_start,
|
|
|
|
.change_queue_depth = scsi_change_queue_depth,
|
|
|
|
.map_queues = qla2xxx_map_queues,
|
|
|
|
.this_id = -1,
|
|
|
|
.cmd_per_lun = 3,
|
|
|
|
.sg_tablesize = SG_ALL,
|
|
|
|
|
|
|
|
.max_sectors = 0xFFFF,
|
|
|
|
.shost_attrs = qla2x00_host_attrs,
|
|
|
|
|
|
|
|
.supported_mode = MODE_INITIATOR,
|
|
|
|
.track_queue_depth = 1,
|
2019-08-09 03:02:06 +00:00
|
|
|
.cmd_size = sizeof(srb_t),
|
2019-04-04 19:44:43 +00:00
|
|
|
};
|
|
|
|
|
2012-09-07 16:33:16 +00:00
|
|
|
static const struct pci_error_handlers qla2xxx_err_handler = {
|
2007-09-20 21:07:36 +00:00
|
|
|
.error_detected = qla2xxx_pci_error_detected,
|
|
|
|
.mmio_enabled = qla2xxx_pci_mmio_enabled,
|
|
|
|
.slot_reset = qla2xxx_pci_slot_reset,
|
|
|
|
.resume = qla2xxx_pci_resume,
|
2019-01-25 07:23:40 +00:00
|
|
|
.reset_prepare = qla_pci_reset_prepare,
|
|
|
|
.reset_done = qla_pci_reset_done,
|
2007-09-20 21:07:36 +00:00
|
|
|
};
|
|
|
|
|
2005-11-09 23:49:04 +00:00
|
|
|
static struct pci_device_id qla2xxx_pci_tbl[] = {
|
2006-05-17 22:09:39 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
|
2008-04-03 20:13:26 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
|
2006-05-17 22:09:39 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
|
2007-07-20 03:37:34 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
|
2012-02-09 19:15:34 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
|
2009-01-05 19:18:11 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
|
2010-04-13 00:59:55 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
|
2012-08-22 18:20:55 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
|
2013-03-28 12:21:23 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
|
2013-08-27 05:37:28 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
|
2014-02-26 09:15:06 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
|
2014-04-11 20:54:13 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
|
2015-08-04 17:38:03 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
|
2019-03-12 18:08:13 +00:00
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) },
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) },
|
2005-11-09 23:49:04 +00:00
|
|
|
{ 0 },
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
|
|
|
|
|
2005-07-06 17:31:47 +00:00
|
|
|
static struct pci_driver qla2xxx_pci_driver = {
|
2006-05-17 22:09:45 +00:00
|
|
|
.name = QLA2XXX_DRIVER_NAME,
|
2005-12-01 18:51:50 +00:00
|
|
|
.driver = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
},
|
2005-07-06 17:31:47 +00:00
|
|
|
.id_table = qla2xxx_pci_tbl,
|
2006-06-23 23:11:22 +00:00
|
|
|
.probe = qla2x00_probe_one,
|
2008-01-14 08:55:16 +00:00
|
|
|
.remove = qla2x00_remove_one,
|
2010-10-15 18:27:46 +00:00
|
|
|
.shutdown = qla2x00_shutdown,
|
2007-09-20 21:07:36 +00:00
|
|
|
.err_handler = &qla2xxx_err_handler,
|
2005-07-06 17:31:47 +00:00
|
|
|
};
|
|
|
|
|
2013-04-04 23:09:41 +00:00
|
|
|
static const struct file_operations apidev_fops = {
|
2010-05-04 22:01:24 +00:00
|
|
|
.owner = THIS_MODULE,
|
llseek: automatically add .llseek fop
All file_operations should get a .llseek operation so we can make
nonseekable_open the default for future file operations without a
.llseek pointer.
The three cases that we can automatically detect are no_llseek, seq_lseek
and default_llseek. For cases where we can we can automatically prove that
the file offset is always ignored, we use noop_llseek, which maintains
the current behavior of not returning an error from a seek.
New drivers should normally not use noop_llseek but instead use no_llseek
and call nonseekable_open at open time. Existing drivers can be converted
to do the same when the maintainer knows for certain that no user code
relies on calling seek on the device file.
The generated code is often incorrectly indented and right now contains
comments that clarify for each added line why a specific variant was
chosen. In the version that gets submitted upstream, the comments will
be gone and I will manually fix the indentation, because there does not
seem to be a way to do that using coccinelle.
Some amount of new code is currently sitting in linux-next that should get
the same modifications, which I will do at the end of the merge window.
Many thanks to Julia Lawall for helping me learn to write a semantic
patch that does all this.
===== begin semantic patch =====
// This adds an llseek= method to all file operations,
// as a preparation for making no_llseek the default.
//
// The rules are
// - use no_llseek explicitly if we do nonseekable_open
// - use seq_lseek for sequential files
// - use default_llseek if we know we access f_pos
// - use noop_llseek if we know we don't access f_pos,
// but we still want to allow users to call lseek
//
@ open1 exists @
identifier nested_open;
@@
nested_open(...)
{
<+...
nonseekable_open(...)
...+>
}
@ open exists@
identifier open_f;
identifier i, f;
identifier open1.nested_open;
@@
int open_f(struct inode *i, struct file *f)
{
<+...
(
nonseekable_open(...)
|
nested_open(...)
)
...+>
}
@ read disable optional_qualifier exists @
identifier read_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
expression E;
identifier func;
@@
ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off)
{
<+...
(
*off = E
|
*off += E
|
func(..., off, ...)
|
E = *off
)
...+>
}
@ read_no_fpos disable optional_qualifier exists @
identifier read_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
@@
ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off)
{
... when != off
}
@ write @
identifier write_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
expression E;
identifier func;
@@
ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off)
{
<+...
(
*off = E
|
*off += E
|
func(..., off, ...)
|
E = *off
)
...+>
}
@ write_no_fpos @
identifier write_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
@@
ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off)
{
... when != off
}
@ fops0 @
identifier fops;
@@
struct file_operations fops = {
...
};
@ has_llseek depends on fops0 @
identifier fops0.fops;
identifier llseek_f;
@@
struct file_operations fops = {
...
.llseek = llseek_f,
...
};
@ has_read depends on fops0 @
identifier fops0.fops;
identifier read_f;
@@
struct file_operations fops = {
...
.read = read_f,
...
};
@ has_write depends on fops0 @
identifier fops0.fops;
identifier write_f;
@@
struct file_operations fops = {
...
.write = write_f,
...
};
@ has_open depends on fops0 @
identifier fops0.fops;
identifier open_f;
@@
struct file_operations fops = {
...
.open = open_f,
...
};
// use no_llseek if we call nonseekable_open
////////////////////////////////////////////
@ nonseekable1 depends on !has_llseek && has_open @
identifier fops0.fops;
identifier nso ~= "nonseekable_open";
@@
struct file_operations fops = {
... .open = nso, ...
+.llseek = no_llseek, /* nonseekable */
};
@ nonseekable2 depends on !has_llseek @
identifier fops0.fops;
identifier open.open_f;
@@
struct file_operations fops = {
... .open = open_f, ...
+.llseek = no_llseek, /* open uses nonseekable */
};
// use seq_lseek for sequential files
/////////////////////////////////////
@ seq depends on !has_llseek @
identifier fops0.fops;
identifier sr ~= "seq_read";
@@
struct file_operations fops = {
... .read = sr, ...
+.llseek = seq_lseek, /* we have seq_read */
};
// use default_llseek if there is a readdir
///////////////////////////////////////////
@ fops1 depends on !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier readdir_e;
@@
// any other fop is used that changes pos
struct file_operations fops = {
... .readdir = readdir_e, ...
+.llseek = default_llseek, /* readdir is present */
};
// use default_llseek if at least one of read/write touches f_pos
/////////////////////////////////////////////////////////////////
@ fops2 depends on !fops1 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read.read_f;
@@
// read fops use offset
struct file_operations fops = {
... .read = read_f, ...
+.llseek = default_llseek, /* read accesses f_pos */
};
@ fops3 depends on !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier write.write_f;
@@
// write fops use offset
struct file_operations fops = {
... .write = write_f, ...
+ .llseek = default_llseek, /* write accesses f_pos */
};
// Use noop_llseek if neither read nor write accesses f_pos
///////////////////////////////////////////////////////////
@ fops4 depends on !fops1 && !fops2 && !fops3 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read_no_fpos.read_f;
identifier write_no_fpos.write_f;
@@
// write fops use offset
struct file_operations fops = {
...
.write = write_f,
.read = read_f,
...
+.llseek = noop_llseek, /* read and write both use no f_pos */
};
@ depends on has_write && !has_read && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier write_no_fpos.write_f;
@@
struct file_operations fops = {
... .write = write_f, ...
+.llseek = noop_llseek, /* write uses no f_pos */
};
@ depends on has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read_no_fpos.read_f;
@@
struct file_operations fops = {
... .read = read_f, ...
+.llseek = noop_llseek, /* read uses no f_pos */
};
@ depends on !has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
@@
struct file_operations fops = {
...
+.llseek = noop_llseek, /* no read or write fn */
};
===== End semantic patch =====
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Julia Lawall <julia@diku.dk>
Cc: Christoph Hellwig <hch@infradead.org>
2010-08-15 16:52:59 +00:00
|
|
|
.llseek = noop_llseek,
|
2010-05-04 22:01:24 +00:00
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
|
|
|
* qla2x00_module_init - Module initialization.
|
|
|
|
**/
|
|
|
|
static int __init
|
|
|
|
qla2x00_module_init(void)
|
|
|
|
{
|
2005-07-06 17:31:47 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(init_cb_t) != 96);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(mrk_entry_t) != 64);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(request_t) != 64);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
|
2021-08-10 04:37:11 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
|
|
|
|
BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
|
|
|
|
BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
|
|
|
|
BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260);
|
|
|
|
BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256);
|
|
|
|
BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24);
|
|
|
|
BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256);
|
|
|
|
BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288);
|
|
|
|
BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
|
|
|
|
BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct mbx_entry) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252);
|
|
|
|
BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512);
|
|
|
|
BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196);
|
2020-05-18 21:17:04 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128);
|
2020-05-18 21:17:01 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24);
|
|
|
|
BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16);
|
|
|
|
BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52);
|
2019-04-17 21:44:37 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
|
2020-05-18 21:17:02 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(sts21_entry_t) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(sts22_entry_t) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(sts_entry_t) != 64);
|
|
|
|
BUILD_BUG_ON(sizeof(sw_info_t) != 32);
|
|
|
|
BUILD_BUG_ON(sizeof(target_id_t) != 2);
|
2019-04-17 21:44:37 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Allocate cache for SRBs. */
|
2005-04-23 06:47:27 +00:00
|
|
|
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
|
2007-07-20 01:11:58 +00:00
|
|
|
SLAB_HWCACHE_ALIGN, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (srb_cachep == NULL) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, NULL, 0x0001,
|
|
|
|
"Unable to allocate SRB cache...Failing load!.\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2012-05-15 18:34:28 +00:00
|
|
|
/* Initialize target kmem_cache and mem_pools */
|
|
|
|
ret = qlt_init();
|
|
|
|
if (ret < 0) {
|
2019-04-04 19:44:46 +00:00
|
|
|
goto destroy_cache;
|
2012-05-15 18:34:28 +00:00
|
|
|
} else if (ret > 0) {
|
|
|
|
/*
|
|
|
|
* If initiator mode is explictly disabled by qlt_init(),
|
|
|
|
* prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
|
|
|
|
* performing scsi_scan_target() during LOOP UP event.
|
|
|
|
*/
|
|
|
|
qla2xxx_transport_functions.disable_target_scan = 1;
|
|
|
|
qla2xxx_transport_vport_functions.disable_target_scan = 1;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Derive version string. */
|
|
|
|
strcpy(qla2x00_version_str, QLA2XXX_VERSION);
|
2006-10-06 16:54:59 +00:00
|
|
|
if (ql2xextended_error_logging)
|
2006-06-23 23:11:10 +00:00
|
|
|
strcat(qla2x00_version_str, "-debug");
|
2017-08-23 22:05:10 +00:00
|
|
|
if (ql2xextended_error_logging == 1)
|
|
|
|
ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
|
2006-06-23 23:11:10 +00:00
|
|
|
|
2018-09-11 17:18:18 +00:00
|
|
|
if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL)
|
|
|
|
qla_insert_tgt_attrs();
|
|
|
|
|
2005-04-21 20:13:36 +00:00
|
|
|
qla2xxx_transport_template =
|
|
|
|
fc_attach_transport(&qla2xxx_transport_functions);
|
2007-07-05 20:16:51 +00:00
|
|
|
if (!qla2xxx_transport_template) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, NULL, 0x0002,
|
|
|
|
"fc_attach_transport failed...Failing load!.\n");
|
2019-04-04 19:44:46 +00:00
|
|
|
ret = -ENODEV;
|
|
|
|
goto qlt_exit;
|
2007-07-05 20:16:51 +00:00
|
|
|
}
|
2010-05-04 22:01:24 +00:00
|
|
|
|
|
|
|
apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
|
|
|
|
if (apidev_major < 0) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, NULL, 0x0003,
|
|
|
|
"Unable to register char device %s.\n", QLA2XXX_APIDEV);
|
2010-05-04 22:01:24 +00:00
|
|
|
}
|
|
|
|
|
2007-07-05 20:16:51 +00:00
|
|
|
qla2xxx_transport_vport_template =
|
|
|
|
fc_attach_transport(&qla2xxx_transport_vport_functions);
|
|
|
|
if (!qla2xxx_transport_vport_template) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, NULL, 0x0004,
|
|
|
|
"fc_attach_transport vport failed...Failing load!.\n");
|
2019-04-04 19:44:46 +00:00
|
|
|
ret = -ENODEV;
|
|
|
|
goto unreg_chrdev;
|
2007-07-05 20:16:51 +00:00
|
|
|
}
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_info, NULL, 0x0005,
|
|
|
|
"QLogic Fibre Channel HBA Driver: %s.\n",
|
2008-05-13 05:21:08 +00:00
|
|
|
qla2x00_version_str);
|
2006-06-23 23:11:22 +00:00
|
|
|
ret = pci_register_driver(&qla2xxx_pci_driver);
|
2005-07-06 17:31:47 +00:00
|
|
|
if (ret) {
|
2011-07-14 19:00:13 +00:00
|
|
|
ql_log(ql_log_fatal, NULL, 0x0006,
|
|
|
|
"pci_register_driver failed...ret=%d Failing load!.\n",
|
|
|
|
ret);
|
2019-04-04 19:44:46 +00:00
|
|
|
goto release_vport_transport;
|
2005-07-06 17:31:47 +00:00
|
|
|
}
|
|
|
|
return ret;
|
2019-04-04 19:44:46 +00:00
|
|
|
|
|
|
|
release_vport_transport:
|
|
|
|
fc_release_transport(qla2xxx_transport_vport_template);
|
|
|
|
|
|
|
|
unreg_chrdev:
|
|
|
|
if (apidev_major >= 0)
|
|
|
|
unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
|
|
|
|
fc_release_transport(qla2xxx_transport_template);
|
|
|
|
|
|
|
|
qlt_exit:
|
|
|
|
qlt_exit();
|
|
|
|
|
|
|
|
destroy_cache:
|
|
|
|
kmem_cache_destroy(srb_cachep);
|
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qla2x00_module_exit - Module cleanup.
|
|
|
|
**/
|
|
|
|
static void __exit
|
|
|
|
qla2x00_module_exit(void)
|
|
|
|
{
|
2006-06-23 23:11:22 +00:00
|
|
|
pci_unregister_driver(&qla2xxx_pci_driver);
|
2005-11-09 23:49:04 +00:00
|
|
|
qla2x00_release_firmware();
|
2018-12-02 20:52:11 +00:00
|
|
|
kmem_cache_destroy(ctx_cachep);
|
2007-07-05 20:16:51 +00:00
|
|
|
fc_release_transport(qla2xxx_transport_vport_template);
|
2019-04-04 19:44:47 +00:00
|
|
|
if (apidev_major >= 0)
|
|
|
|
unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
|
|
|
|
fc_release_transport(qla2xxx_transport_template);
|
|
|
|
qlt_exit();
|
|
|
|
kmem_cache_destroy(srb_cachep);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(qla2x00_module_init);
|
|
|
|
module_exit(qla2x00_module_exit);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("QLogic Corporation");
|
|
|
|
MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
|
|
|
|
MODULE_LICENSE("GPL");
|
2006-10-02 19:00:48 +00:00
|
|
|
MODULE_FIRMWARE(FW_FILE_ISP21XX);
|
|
|
|
MODULE_FIRMWARE(FW_FILE_ISP22XX);
|
|
|
|
MODULE_FIRMWARE(FW_FILE_ISP2300);
|
|
|
|
MODULE_FIRMWARE(FW_FILE_ISP2322);
|
|
|
|
MODULE_FIRMWARE(FW_FILE_ISP24XX);
|
2008-01-31 20:33:45 +00:00
|
|
|
MODULE_FIRMWARE(FW_FILE_ISP25XX);
|