diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index dbeb8a0d7175..7ef9b843d529 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -239,8 +239,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
 	buf += "#include <target/target_core_configfs.h>\n"
 	buf += "#include <target/target_core_base.h>\n"
 	buf += "#include <target/configfs_macros.h>\n\n"
-	buf += "#include <" + fabric_mod_name + "_base.h>\n"
-	buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n"
+	buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
+	buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
 
 	buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
 	buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
@@ -289,6 +289,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
 	buf += "{\n"
 	buf += "	struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
 	buf += "				struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
+	buf += "	core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
 	buf += "	kfree(nacl);\n"
 	buf += "}\n\n"
 
@@ -583,9 +584,9 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
 	buf += "#include <target/target_core_fabric_lib.h>\n"
 	buf += "#include <target/target_core_device.h>\n"
 	buf += "#include <target/target_core_tpg.h>\n"
-	buf += "#include <target/target_core_configfs.h>\n"
-	buf += "#include <" + fabric_mod_name + "_base.h>\n"
-	buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n"
+	buf += "#include <target/target_core_configfs.h>\n\n"
+	buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
+	buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
 
 	buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
 	buf += "{\n"
@@ -973,14 +974,13 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
 def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
 
 	buf = ""
-	f = fabric_mod_dir_var + "/Kbuild"
+	f = fabric_mod_dir_var + "/Makefile"
 	print "Writing file: " + f
 
 	p = open(f, 'w')
 	if not p:
 		tcm_mod_err("Unable to open file: " + f)
 
-	buf = "EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/include/ -I$(srctree)/drivers/scsi/ -I$(srctree)/include/scsi/ -I$(srctree)/drivers/target/" + fabric_mod_name + "\n\n"
 	buf += fabric_mod_name + "-objs			:= " + fabric_mod_name + "_fabric.o \\\n"
 	buf += "					   " + fabric_mod_name + "_configfs.o\n"
 	buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ")		+= " + fabric_mod_name + ".o\n"
@@ -1018,7 +1018,7 @@ def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
 
 def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
 	buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ")	+= " + fabric_mod_name.lower() + "/\n"
-	kbuild = tcm_dir + "/drivers/target/Kbuild"
+	kbuild = tcm_dir + "/drivers/target/Makefile"
 
 	f = open(kbuild, 'a')
 	f.write(buf)
@@ -1064,7 +1064,7 @@ def main(modname, proto_ident):
 	tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
 	tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
 
-	input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kbuild..? [yes,no]: ")
+	input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
 	if input == "yes" or input == "y":
 		tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
 
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
index 92df4d6b6147..1bd9fd18f7f3 100644
--- a/drivers/scsi/aacraid/Makefile
+++ b/drivers/scsi/aacraid/Makefile
@@ -3,6 +3,6 @@
 obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
 
 aacraid-objs	:= linit.o aachba.o commctrl.o comminit.o commsup.o \
-		   dpcsup.o rx.o sa.o rkt.o nark.o
+		   dpcsup.o rx.o sa.o rkt.o nark.o src.o
 
 ccflags-y	:= -Idrivers/scsi
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 7df2dd1d2c6f..118ce83a737c 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -5,7 +5,8 @@
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
  *
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1486,7 +1487,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
 			dev->a_ops.adapter_write = aac_write_block;
 		}
 		dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
-		if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
+		if (dev->adapter_info.options & AAC_OPT_NEW_COMM_TYPE1)
+			dev->adapter_info.options |= AAC_OPT_NEW_COMM;
+		if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
 			/*
 			 * Worst case size that could cause sg overflow when
 			 * we break up SG elements that are larger than 64KB.
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 4dbcc055ac78..29ab00016b78 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
  *----------------------------------------------------------------------------*/
 
 #ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 26400
+# define AAC_DRIVER_BUILD 28000
 # define AAC_DRIVER_BRANCH "-ms"
 #endif
 #define MAXIMUM_NUM_CONTAINERS	32
@@ -277,6 +277,16 @@ enum aac_queue_types {
 
 #define		FsaNormal	1
 
+/* transport FIB header (PMC) */
+struct aac_fib_xporthdr {
+	u64	HostAddress;	/* FIB host address w/o xport header */
+	u32	Size;		/* FIB size excluding xport header */
+	u32	Handle;		/* driver handle to reference the FIB */
+	u64	Reserved[2];
+};
+
+#define		ALIGN32		32
+
 /*
  * Define the FIB. The FIB is the where all the requested data and
  * command information are put to the application on the FSA adapter.
@@ -394,7 +404,9 @@ enum fib_xfer_state {
 	AdapterMicroFib			= (1<<17),
 	BIOSFibPath			= (1<<18),
 	FastResponseCapable		= (1<<19),
-	ApiFib				= (1<<20)	// Its an API Fib.
+	ApiFib				= (1<<20),	/* Its an API Fib */
+	/* PMC NEW COMM: There is no more AIF data pending */
+	NoMoreAifDataAvailable		= (1<<21)
 };
 
 /*
@@ -404,6 +416,7 @@ enum fib_xfer_state {
 
 #define ADAPTER_INIT_STRUCT_REVISION		3
 #define ADAPTER_INIT_STRUCT_REVISION_4		4 // rocket science
+#define ADAPTER_INIT_STRUCT_REVISION_6		6 /* PMC src */
 
 struct aac_init
 {
@@ -428,9 +441,15 @@ struct aac_init
 #define INITFLAGS_NEW_COMM_SUPPORTED	0x00000001
 #define INITFLAGS_DRIVER_USES_UTC_TIME	0x00000010
 #define INITFLAGS_DRIVER_SUPPORTS_PM	0x00000020
+#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED	0x00000041
 	__le32	MaxIoCommands;	/* max outstanding commands */
 	__le32	MaxIoSize;	/* largest I/O command */
 	__le32	MaxFibSize;	/* largest FIB to adapter */
+	/* ADAPTER_INIT_STRUCT_REVISION_5 begins here */
+	__le32	MaxNumAif;	/* max number of aif */
+	/* ADAPTER_INIT_STRUCT_REVISION_6 begins here */
+	__le32	HostRRQ_AddrLow;
+	__le32	HostRRQ_AddrHigh;	/* Host RRQ (response queue) for SRC */
 };
 
 enum aac_log_level {
@@ -685,7 +704,7 @@ struct rx_inbound {
 #define OutboundDoorbellReg	MUnit.ODR
 
 struct rx_registers {
-	struct rx_mu_registers		MUnit;		/* 1300h - 1344h */
+	struct rx_mu_registers		MUnit;		/* 1300h - 1347h */
 	__le32				reserved1[2];	/* 1348h - 134ch */
 	struct rx_inbound		IndexRegs;
 };
@@ -703,7 +722,7 @@ struct rx_registers {
 #define rkt_inbound rx_inbound
 
 struct rkt_registers {
-	struct rkt_mu_registers		MUnit;		 /* 1300h - 1344h */
+	struct rkt_mu_registers		MUnit;		 /* 1300h - 1347h */
 	__le32				reserved1[1006]; /* 1348h - 22fch */
 	struct rkt_inbound		IndexRegs;	 /* 2300h - */
 };
@@ -713,6 +732,44 @@ struct rkt_registers {
 #define rkt_writeb(AEP, CSR, value)	writeb(value, &((AEP)->regs.rkt->CSR))
 #define rkt_writel(AEP, CSR, value)	writel(value, &((AEP)->regs.rkt->CSR))
 
+/*
+ * PMC SRC message unit registers
+ */
+
+#define src_inbound rx_inbound
+
+struct src_mu_registers {
+				/*	PCI*| Name */
+	__le32	reserved0[8];	/*	00h | Reserved */
+	__le32	IDR;		/*	20h | Inbound Doorbell Register */
+	__le32	IISR;		/*	24h | Inbound Int. Status Register */
+	__le32	reserved1[3];	/*	28h | Reserved */
+	__le32	OIMR;		/*	34h | Outbound Int. Mask Register */
+	__le32	reserved2[25];	/*	38h | Reserved */
+	__le32	ODR_R;		/*	9ch | Outbound Doorbell Read */
+	__le32	ODR_C;		/*	a0h | Outbound Doorbell Clear */
+	__le32	reserved3[6];	/*	a4h | Reserved */
+	__le32	OMR;		/*	bch | Outbound Message Register */
+	__le32	IQ_L;		/*  c0h | Inbound Queue (Low address) */
+	__le32	IQ_H;		/*  c4h | Inbound Queue (High address) */
+};
+
+struct src_registers {
+	struct src_mu_registers MUnit;	/* 00h - c7h */
+	__le32 reserved1[130790];	/* c8h - 7fc5fh */
+	struct src_inbound IndexRegs;	/* 7fc60h */
+};
+
+#define src_readb(AEP, CSR)		readb(&((AEP)->regs.src.bar0->CSR))
+#define src_readl(AEP, CSR)		readl(&((AEP)->regs.src.bar0->CSR))
+#define src_writeb(AEP, CSR, value)	writeb(value, \
+						&((AEP)->regs.src.bar0->CSR))
+#define src_writel(AEP, CSR, value)	writel(value, \
+						&((AEP)->regs.src.bar0->CSR))
+
+#define SRC_ODR_SHIFT		12
+#define SRC_IDR_SHIFT		9
+
 typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
 
 struct aac_fib_context {
@@ -879,6 +936,7 @@ struct aac_supplement_adapter_info
 #define AAC_OPTION_MU_RESET		cpu_to_le32(0x00000001)
 #define AAC_OPTION_IGNORE_RESET		cpu_to_le32(0x00000002)
 #define AAC_OPTION_POWER_MANAGEMENT	cpu_to_le32(0x00000004)
+#define AAC_OPTION_DOORBELL_RESET	cpu_to_le32(0x00004000)
 #define AAC_SIS_VERSION_V3	3
 #define AAC_SIS_SLOT_UNKNOWN	0xFF
 
@@ -940,6 +998,7 @@ struct aac_bus_info_response {
 #define AAC_OPT_SUPPLEMENT_ADAPTER_INFO	cpu_to_le32(1<<16)
 #define AAC_OPT_NEW_COMM		cpu_to_le32(1<<17)
 #define AAC_OPT_NEW_COMM_64		cpu_to_le32(1<<18)
+#define AAC_OPT_NEW_COMM_TYPE1		cpu_to_le32(1<<28)
 
 struct aac_dev
 {
@@ -952,6 +1011,7 @@ struct aac_dev
 	 */
 	unsigned		max_fib_size;
 	unsigned		sg_tablesize;
+	unsigned		max_num_aif;
 
 	/*
 	 *	Map for 128 fib objects (64k)
@@ -980,10 +1040,21 @@ struct aac_dev
 	struct adapter_ops	a_ops;
 	unsigned long		fsrev;		/* Main driver's revision number */
 
-	unsigned		base_size;	/* Size of mapped in region */
+	unsigned long		dbg_base;	/* address of UART
+						 * debug buffer */
+
+	unsigned		base_size, dbg_size;	/* Size of
+							 *  mapped in region */
+
 	struct aac_init		*init;		/* Holds initialization info to communicate with adapter */
 	dma_addr_t		init_pa;	/* Holds physical address of the init struct */
 
+	u32			*host_rrq;	/* response queue
+						 * if AAC_COMM_MESSAGE_TYPE1 */
+
+	dma_addr_t		host_rrq_pa;	/* phys. address */
+	u32			host_rrq_idx;	/* index into rrq buffer */
+
 	struct pci_dev		*pdev;		/* Our PCI interface */
 	void *			printfbuf;	/* pointer to buffer used for printf's from the adapter */
 	void *			comm_addr;	/* Base address of Comm area */
@@ -1003,14 +1074,20 @@ struct aac_dev
 	 */
 #ifndef AAC_MIN_FOOTPRINT_SIZE
 #	define AAC_MIN_FOOTPRINT_SIZE 8192
+#	define AAC_MIN_SRC_BAR0_SIZE 0x400000
+#	define AAC_MIN_SRC_BAR1_SIZE 0x800
 #endif
 	union
 	{
 		struct sa_registers __iomem *sa;
 		struct rx_registers __iomem *rx;
 		struct rkt_registers __iomem *rkt;
+		struct {
+			struct src_registers __iomem *bar0;
+			char __iomem *bar1;
+		} src;
 	} regs;
-	volatile void __iomem *base;
+	volatile void __iomem *base, *dbg_base_mapped;
 	volatile struct rx_inbound __iomem *IndexRegs;
 	u32			OIMR; /* Mask Register Cache */
 	/*
@@ -1031,9 +1108,8 @@ struct aac_dev
 	u8			comm_interface;
 #	define AAC_COMM_PRODUCER 0
 #	define AAC_COMM_MESSAGE  1
-	/* macro side-effects BEWARE */
-#	define			raw_io_interface \
-	  init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
+#	define AAC_COMM_MESSAGE_TYPE1	3
+	u8			raw_io_interface;
 	u8			raw_io_64;
 	u8			printf_enabled;
 	u8			in_reset;
@@ -1789,6 +1865,10 @@ extern struct aac_common aac_config;
 #define DoorBellAdapterNormCmdNotFull	(1<<3)	/* Adapter -> Host */
 #define DoorBellAdapterNormRespNotFull	(1<<4)	/* Adapter -> Host */
 #define DoorBellPrintfReady		(1<<5)	/* Adapter -> Host */
+#define DoorBellAifPending		(1<<6)	/* Adapter -> Host */
+
+/* PMC specific outbound doorbell bits */
+#define PmDoorBellResponseSent		(1<<1)	/* Adapter -> Host */
 
 /*
  *	For FIB communication, we need all of the following things
@@ -1831,6 +1911,9 @@ extern struct aac_common aac_config;
 #define		AifReqAPIJobUpdate	109	/* Update a job report from the API */
 #define		AifReqAPIJobFinish	110	/* Finish a job from the API */
 
+/* PMC NEW COMM: Request the event data */
+#define		AifReqEvent		200
+
 /*
  *	Adapter Initiated FIB command structures. Start with the adapter
  *	initiated FIBs that really come from the adapter, and get responded
@@ -1886,10 +1969,13 @@ int aac_rx_init(struct aac_dev *dev);
 int aac_rkt_init(struct aac_dev *dev);
 int aac_nark_init(struct aac_dev *dev);
 int aac_sa_init(struct aac_dev *dev);
+int aac_src_init(struct aac_dev *dev);
 int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
 unsigned int aac_response_normal(struct aac_queue * q);
 unsigned int aac_command_normal(struct aac_queue * q);
-unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index,
+			int isAif, int isFastResponse,
+			struct hw_fib *aif_fib);
 int aac_reset_adapter(struct aac_dev * dev, int forced);
 int aac_check_health(struct aac_dev * dev);
 int aac_command_thread(void *data);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 645ddd9d9b9e..8a0b33033177 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -5,7 +5,8 @@
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
  *
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index a7261486ccd4..7ac8fdb5577b 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -5,7 +5,8 @@
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
  *
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -52,12 +53,16 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
 	unsigned long size, align;
 	const unsigned long fibsize = 4096;
 	const unsigned long printfbufsiz = 256;
+	unsigned long host_rrq_size = 0;
 	struct aac_init *init;
 	dma_addr_t phys;
 	unsigned long aac_max_hostphysmempages;
 
-	size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
-
+	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1)
+		host_rrq_size = (dev->scsi_host_ptr->can_queue
+			+ AAC_NUM_MGT_FIB) * sizeof(u32);
+	size = fibsize + sizeof(struct aac_init) + commsize +
+			commalign + printfbufsiz + host_rrq_size;
  
 	base = pci_alloc_consistent(dev->pdev, size, &phys);
 
@@ -70,8 +75,14 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
 	dev->comm_phys = phys;
 	dev->comm_size = size;
 	
-	dev->init = (struct aac_init *)(base + fibsize);
-	dev->init_pa = phys + fibsize;
+	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+		dev->host_rrq = (u32 *)(base + fibsize);
+		dev->host_rrq_pa = phys + fibsize;
+		memset(dev->host_rrq, 0, host_rrq_size);
+	}
+
+	dev->init = (struct aac_init *)(base + fibsize + host_rrq_size);
+	dev->init_pa = phys + fibsize + host_rrq_size;
 
 	init = dev->init;
 
@@ -106,8 +117,13 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
 
 	init->InitFlags = 0;
 	if (dev->comm_interface == AAC_COMM_MESSAGE) {
-		init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
+		init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
 		dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
+	} else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+		init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
+		init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED);
+		dprintk((KERN_WARNING
+			"aacraid: New Comm Interface type1 enabled\n"));
 	}
 	init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
 				       INITFLAGS_DRIVER_SUPPORTS_PM);
@@ -115,11 +131,18 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
 	init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
 	init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
 
+	init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
+	init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32);
+	init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff);
+
+
 	/*
 	 * Increment the base address by the amount already used
 	 */
-	base = base + fibsize + sizeof(struct aac_init);
-	phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init));
+	base = base + fibsize + host_rrq_size + sizeof(struct aac_init);
+	phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size +
+		sizeof(struct aac_init));
+
 	/*
 	 *	Align the beginning of Headers to commalign
 	 */
@@ -314,15 +337,22 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
 		- sizeof(struct aac_write) + sizeof(struct sgentry))
 			/ sizeof(struct sgentry);
 	dev->comm_interface = AAC_COMM_PRODUCER;
-	dev->raw_io_64 = 0;
+	dev->raw_io_interface = dev->raw_io_64 = 0;
+
 	if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
 		0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
 	 		(status[0] == 0x00000001)) {
 		if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
 			dev->raw_io_64 = 1;
-		if (dev->a_ops.adapter_comm &&
-		    (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)))
-			dev->comm_interface = AAC_COMM_MESSAGE;
+		if (dev->a_ops.adapter_comm) {
+			if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) {
+				dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
+				dev->raw_io_interface = 1;
+			} else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) {
+				dev->comm_interface = AAC_COMM_MESSAGE;
+				dev->raw_io_interface = 1;
+			}
+		}
 		if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
 		    (status[2] > dev->base_size)) {
 			aac_adapter_ioremap(dev, 0);
@@ -350,10 +380,12 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
 		 *	status[3] & 0xFFFF	maximum number FIBs outstanding
 		 */
 		host->max_sectors = (status[1] >> 16) << 1;
-		dev->max_fib_size = status[1] & 0xFFFF;
+		/* Multiple of 32 for PMC */
+		dev->max_fib_size = status[1] & 0xFFE0;
 		host->sg_tablesize = status[2] >> 16;
 		dev->sg_tablesize = status[2] & 0xFFFF;
 		host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
+		dev->max_num_aif = status[4] & 0xFFFF;
 		/*
 		 *	NOTE:
 		 *	All these overrides are based on a fixed internal
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 060ac4bd5a14..dd7ad3ba2dad 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -5,7 +5,8 @@
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
  *
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -63,9 +64,11 @@ static int fib_map_alloc(struct aac_dev *dev)
 	  "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
 	  dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
 	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
-	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
-	  * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
-	  &dev->hw_fib_pa))==NULL)
+	dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
+		(dev->max_fib_size + sizeof(struct aac_fib_xporthdr))
+		* (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
+		&dev->hw_fib_pa);
+	if (dev->hw_fib_va == NULL)
 		return -ENOMEM;
 	return 0;
 }
@@ -110,9 +113,22 @@ int aac_fib_setup(struct aac_dev * dev)
 	if (i<0)
 		return -ENOMEM;
 
+	/* 32 byte alignment for PMC */
+	hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
+	dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+		(hw_fib_pa - dev->hw_fib_pa));
+	dev->hw_fib_pa = hw_fib_pa;
+	memset(dev->hw_fib_va, 0,
+		(dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) *
+		(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
+
+	/* add Xport header */
+	dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+		sizeof(struct aac_fib_xporthdr));
+	dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr);
+
 	hw_fib = dev->hw_fib_va;
 	hw_fib_pa = dev->hw_fib_pa;
-	memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
 	/*
 	 *	Initialise the fibs
 	 */
@@ -129,8 +145,10 @@ int aac_fib_setup(struct aac_dev * dev)
 		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
 		hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
 		fibptr->hw_fib_pa = hw_fib_pa;
-		hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
-		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
+		hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
+			dev->max_fib_size + sizeof(struct aac_fib_xporthdr));
+		hw_fib_pa = hw_fib_pa +
+			dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
 	}
 	/*
 	 *	Add the fib chain to the free list
@@ -664,9 +682,14 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
 	unsigned long nointr = 0;
 	unsigned long qflags;
 
+	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+		kfree(hw_fib);
+		return 0;
+	}
+
 	if (hw_fib->header.XferState == 0) {
 		if (dev->comm_interface == AAC_COMM_MESSAGE)
-			kfree (hw_fib);
+			kfree(hw_fib);
 		return 0;
 	}
 	/*
@@ -674,7 +697,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
 	 */
 	if (hw_fib->header.StructType != FIB_MAGIC) {
 		if (dev->comm_interface == AAC_COMM_MESSAGE)
-			kfree (hw_fib);
+			kfree(hw_fib);
 		return -EINVAL;
 	}
 	/*
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 9c7408fe8c7d..f0c66a80ad13 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -5,7 +5,8 @@
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
  *
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -228,6 +229,48 @@ unsigned int aac_command_normal(struct aac_queue *q)
 	return 0;
 }
 
+/*
+ *
+ * aac_aif_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the AIFs - new method (SRC)
+ *
+ */
+
+static void aac_aif_callback(void *context, struct fib * fibptr)
+{
+	struct fib *fibctx;
+	struct aac_dev *dev;
+	struct aac_aifcmd *cmd;
+	int status;
+
+	fibctx = (struct fib *)context;
+	BUG_ON(fibptr == NULL);
+	dev = fibptr->dev;
+
+	if (fibptr->hw_fib_va->header.XferState &
+	    cpu_to_le32(NoMoreAifDataAvailable)) {
+		aac_fib_complete(fibptr);
+		aac_fib_free(fibptr);
+		return;
+	}
+
+	aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
+
+	aac_fib_init(fibctx);
+	cmd = (struct aac_aifcmd *) fib_data(fibctx);
+	cmd->command = cpu_to_le32(AifReqEvent);
+
+	status = aac_fib_send(AifRequest,
+		fibctx,
+		sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+		FsaNormal,
+		0, 1,
+		(fib_callback)aac_aif_callback, fibctx);
+}
+
 
 /**
  *	aac_intr_normal	-	Handle command replies
@@ -238,19 +281,17 @@ unsigned int aac_command_normal(struct aac_queue *q)
  *	know there is a response on our normal priority queue. We will pull off
  *	all QE there are and wake up all the waiters before exiting.
  */
-
-unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
+			int isAif, int isFastResponse, struct hw_fib *aif_fib)
 {
 	unsigned long mflags;
 	dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
-	if ((index & 0x00000002L)) {
+	if (isAif == 1) {	/* AIF - common */
 		struct hw_fib * hw_fib;
 		struct fib * fib;
 		struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
 		unsigned long flags;
 
-		if (index == 0xFFFFFFFEL) /* Special Case */
-			return 0;	  /* Do nothing */
 		/*
 		 *	Allocate a FIB. For non queued stuff we can just use
 		 * the stack so we are happy. We need a fib object in order to
@@ -263,8 +304,13 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
 			kfree (fib);
 			return 1;
 		}
-		memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
-		  (index & ~0x00000002L)), sizeof(struct hw_fib));
+		if (aif_fib != NULL) {
+			memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
+		} else {
+			memcpy(hw_fib,
+				(struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
+				index), sizeof(struct hw_fib));
+		}
 		INIT_LIST_HEAD(&fib->fiblink);
 		fib->type = FSAFS_NTC_FIB_CONTEXT;
 		fib->size = sizeof(struct fib);
@@ -277,9 +323,26 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
 	        wake_up_interruptible(&q->cmdready);
 		spin_unlock_irqrestore(q->lock, flags);
 		return 1;
+	} else if (isAif == 2) {	/* AIF - new (SRC) */
+		struct fib *fibctx;
+		struct aac_aifcmd *cmd;
+
+		fibctx = aac_fib_alloc(dev);
+		if (!fibctx)
+			return 1;
+		aac_fib_init(fibctx);
+
+		cmd = (struct aac_aifcmd *) fib_data(fibctx);
+		cmd->command = cpu_to_le32(AifReqEvent);
+
+		return aac_fib_send(AifRequest,
+			fibctx,
+			sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+			FsaNormal,
+			0, 1,
+			(fib_callback)aac_aif_callback, fibctx);
 	} else {
-		int fast = index & 0x01;
-		struct fib * fib = &dev->fibs[index >> 2];
+		struct fib *fib = &dev->fibs[index];
 		struct hw_fib * hwfib = fib->hw_fib_va;
 
 		/*
@@ -298,7 +361,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
 			return 0;
 		}
 
-		if (fast) {
+		if (isFastResponse) {
 			/*
 			 *	Doctor the fib
 			 */
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 2c93d9496d62..4ff26521d75f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -5,7 +5,8 @@
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
  *
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -54,7 +55,7 @@
 
 #include "aacraid.h"
 
-#define AAC_DRIVER_VERSION		"1.1-5"
+#define AAC_DRIVER_VERSION		"1.1-7"
 #ifndef AAC_DRIVER_BRANCH
 #define AAC_DRIVER_BRANCH		""
 #endif
@@ -161,6 +162,7 @@ static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
 	{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
 	{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
 	{ 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
+	{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Catch All */
 	{ 0,}
 };
 MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -235,7 +237,8 @@ static struct aac_driver_ident aac_drivers[] = {
 	{ aac_rx_init, "aacraid",  "Legend  ", "RAID            ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Catch All */
 	{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Rocket Catch All */
-	{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID            ", 2 } /* Adaptec NEMER/ARK Catch All */
+	{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID           ", 2 }, /* Adaptec NEMER/ARK Catch All */
+	{ aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2 } /* Adaptec PMC Catch All */
 };
 
 /**
@@ -653,8 +656,10 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
 	 * This adapter needs a blind reset, only do so for Adapters that
 	 * support a register, instead of a commanded, reset.
 	 */
-	if ((aac->supplement_adapter_info.SupportedOptions2 &
-	   AAC_OPTION_MU_RESET) &&
+	if (((aac->supplement_adapter_info.SupportedOptions2 &
+	  AAC_OPTION_MU_RESET) ||
+	  (aac->supplement_adapter_info.SupportedOptions2 &
+	  AAC_OPTION_DOORBELL_RESET)) &&
 	  aac_check_reset &&
 	  ((aac_check_reset != 1) ||
 	   !(aac->supplement_adapter_info.SupportedOptions2 &
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
index c55f7c862f0e..f397d21a0c06 100644
--- a/drivers/scsi/aacraid/nark.c
+++ b/drivers/scsi/aacraid/nark.c
@@ -4,7 +4,8 @@
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
  *
- * Copyright (c) 2006-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 16d8db550027..be44de92429a 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -5,7 +5,8 @@
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
  *
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 84d77fd86e5b..ce530f113fdb 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -5,7 +5,8 @@
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
  *
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -84,15 +85,35 @@ static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
 
 static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
 {
+	int isAif, isFastResponse, isSpecial;
 	struct aac_dev *dev = dev_id;
 	u32 Index = rx_readl(dev, MUnit.OutboundQueue);
 	if (unlikely(Index == 0xFFFFFFFFL))
 		Index = rx_readl(dev, MUnit.OutboundQueue);
 	if (likely(Index != 0xFFFFFFFFL)) {
 		do {
-			if (unlikely(aac_intr_normal(dev, Index))) {
-				rx_writel(dev, MUnit.OutboundQueue, Index);
-				rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
+			isAif = isFastResponse = isSpecial = 0;
+			if (Index & 0x00000002L) {
+				isAif = 1;
+				if (Index == 0xFFFFFFFEL)
+					isSpecial = 1;
+				Index &= ~0x00000002L;
+			} else {
+				if (Index & 0x00000001L)
+					isFastResponse = 1;
+				Index >>= 2;
+			}
+			if (!isSpecial) {
+				if (unlikely(aac_intr_normal(dev,
+						Index, isAif,
+						isFastResponse, NULL))) {
+					rx_writel(dev,
+						MUnit.OutboundQueue,
+						Index);
+					rx_writel(dev,
+						MUnit.ODR,
+						DoorBellAdapterNormRespReady);
+				}
 			}
 			Index = rx_readl(dev, MUnit.OutboundQueue);
 		} while (Index != 0xFFFFFFFFL);
@@ -631,6 +652,10 @@ int _aac_rx_init(struct aac_dev *dev)
 			name, instance);
 		goto error_iounmap;
 	}
+	dev->dbg_base = dev->scsi_host_ptr->base;
+	dev->dbg_base_mapped = dev->base;
+	dev->dbg_size = dev->base_size;
+
 	aac_adapter_enable_int(dev);
 	/*
 	 *	Tell the adapter that all is configured, and it can
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 622c21c68e65..e5d4457121ea 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -5,7 +5,8 @@
  * based on the old aacraid driver that is..
  * Adaptec aacraid device driver for Linux.
  *
- * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -391,6 +392,10 @@ int aac_sa_init(struct aac_dev *dev)
 			name, instance);
 		goto error_iounmap;
 	}
+	dev->dbg_base = dev->scsi_host_ptr->base;
+	dev->dbg_base_mapped = dev->base;
+	dev->dbg_size = dev->base_size;
+
 	aac_adapter_enable_int(dev);
 
 	/*
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
new file mode 100644
index 000000000000..c20494660603
--- /dev/null
+++ b/drivers/scsi/aacraid/src.c
@@ -0,0 +1,594 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  src.c
+ *
+ * Abstract: Hardware Device Interface for PMC SRC based controllers
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
+{
+	struct aac_dev *dev = dev_id;
+	unsigned long bellbits, bellbits_shifted;
+	int our_interrupt = 0;
+	int isFastResponse;
+	u32 index, handle;
+
+	bellbits = src_readl(dev, MUnit.ODR_R);
+	if (bellbits & PmDoorBellResponseSent) {
+		bellbits = PmDoorBellResponseSent;
+		/* handle async. status */
+		our_interrupt = 1;
+		index = dev->host_rrq_idx;
+		if (dev->host_rrq[index] == 0) {
+			u32 old_index = index;
+			/* adjust index */
+			do {
+				index++;
+				if (index == dev->scsi_host_ptr->can_queue +
+							AAC_NUM_MGT_FIB)
+					index = 0;
+				if (dev->host_rrq[index] != 0)
+					break;
+			} while (index != old_index);
+			dev->host_rrq_idx = index;
+		}
+		for (;;) {
+			isFastResponse = 0;
+			/* remove toggle bit (31) */
+			handle = (dev->host_rrq[index] & 0x7fffffff);
+			/* check fast response bit (30) */
+			if (handle & 0x40000000)
+				isFastResponse = 1;
+			handle &= 0x0000ffff;
+			if (handle == 0)
+				break;
+
+			aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+
+			dev->host_rrq[index++] = 0;
+			if (index == dev->scsi_host_ptr->can_queue +
+						AAC_NUM_MGT_FIB)
+				index = 0;
+			dev->host_rrq_idx = index;
+		}
+	} else {
+		bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
+		if (bellbits_shifted & DoorBellAifPending) {
+			our_interrupt = 1;
+			/* handle AIF */
+			aac_intr_normal(dev, 0, 2, 0, NULL);
+		}
+	}
+
+	if (our_interrupt) {
+		src_writel(dev, MUnit.ODR_C, bellbits);
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+/**
+ *	aac_src_disable_interrupt	-	Disable interrupts
+ *	@dev: Adapter
+ */
+
+static void aac_src_disable_interrupt(struct aac_dev *dev)
+{
+	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
+}
+
+/**
+ *	aac_src_enable_interrupt_message	-	Enable interrupts
+ *	@dev: Adapter
+ */
+
+static void aac_src_enable_interrupt_message(struct aac_dev *dev)
+{
+	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8);
+}
+
+/**
+ *	src_sync_cmd	-	send a command and wait
+ *	@dev: Adapter
+ *	@command: Command to execute
+ *	@p1: first parameter
+ *	@ret: adapter status
+ *
+ *	This routine will send a synchronous command to the adapter and wait
+ *	for its	completion.
+ */
+
+static int src_sync_cmd(struct aac_dev *dev, u32 command,
+	u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
+	u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
+{
+	unsigned long start;
+	int ok;
+
+	/*
+	 *	Write the command into Mailbox 0
+	 */
+	writel(command, &dev->IndexRegs->Mailbox[0]);
+	/*
+	 *	Write the parameters into Mailboxes 1 - 6
+	 */
+	writel(p1, &dev->IndexRegs->Mailbox[1]);
+	writel(p2, &dev->IndexRegs->Mailbox[2]);
+	writel(p3, &dev->IndexRegs->Mailbox[3]);
+	writel(p4, &dev->IndexRegs->Mailbox[4]);
+
+	/*
+	 *	Clear the synch command doorbell to start on a clean slate.
+	 */
+	src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+
+	/*
+	 *	Disable doorbell interrupts
+	 */
+	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
+
+	/*
+	 *	Force the completion of the mask register write before issuing
+	 *	the interrupt.
+	 */
+	src_readl(dev, MUnit.OIMR);
+
+	/*
+	 *	Signal that there is a new synch command
+	 */
+	src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
+
+	ok = 0;
+	start = jiffies;
+
+	/*
+	 *	Wait up to 30 seconds
+	 */
+	while (time_before(jiffies, start+30*HZ)) {
+		/* Delay 5 microseconds to let Mon960 get info. */
+		udelay(5);
+
+		/* Mon960 will set doorbell0 bit
+		 * when it has completed the command
+		 */
+		if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
+			/* Clear the doorbell */
+			src_writel(dev,
+				MUnit.ODR_C,
+				OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+			ok = 1;
+			break;
+		}
+
+		 /* Yield the processor in case we are slow */
+		msleep(1);
+	}
+	if (unlikely(ok != 1)) {
+		 /* Restore interrupt mask even though we timed out */
+		aac_adapter_enable_int(dev);
+		return -ETIMEDOUT;
+	}
+
+	 /* Pull the synch status from Mailbox 0 */
+	if (status)
+		*status = readl(&dev->IndexRegs->Mailbox[0]);
+	if (r1)
+		*r1 = readl(&dev->IndexRegs->Mailbox[1]);
+	if (r2)
+		*r2 = readl(&dev->IndexRegs->Mailbox[2]);
+	if (r3)
+		*r3 = readl(&dev->IndexRegs->Mailbox[3]);
+	if (r4)
+		*r4 = readl(&dev->IndexRegs->Mailbox[4]);
+
+	 /* Clear the synch command doorbell */
+	src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+
+	 /* Restore interrupt mask */
+	aac_adapter_enable_int(dev);
+	return 0;
+
+}
+
+/**
+ *	aac_src_interrupt_adapter	-	interrupt adapter
+ *	@dev: Adapter
+ *
+ *	Send an interrupt to the i960 and breakpoint it.
+ */
+
+static void aac_src_interrupt_adapter(struct aac_dev *dev)
+{
+	src_sync_cmd(dev, BREAKPOINT_REQUEST,
+		0, 0, 0, 0, 0, 0,
+		NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ *	aac_src_notify_adapter		-	send an event to the adapter
+ *	@dev: Adapter
+ *	@event: Event to send
+ *
+ *	Notify the i960 that something it probably cares about has
+ *	happened.
+ */
+
+static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case AdapNormCmdQue:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
+		break;
+	case HostNormRespNotFull:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
+		break;
+	case AdapNormRespQue:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
+		break;
+	case HostNormCmdNotFull:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
+		break;
+	case FastIo:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
+		break;
+	case AdapPrintfDone:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+/**
+ *	aac_src_start_adapter		-	activate adapter
+ *	@dev:	Adapter
+ *
+ *	Start up processing on an i960 based AAC adapter
+ */
+
+static void aac_src_start_adapter(struct aac_dev *dev)
+{
+	struct aac_init *init;
+
+	init = dev->init;
+	init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+
+	/* We can only use a 32 bit address here */
+	src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
+	  0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ *	aac_src_check_health
+ *	@dev: device to check if healthy
+ *
+ *	Will attempt to determine if the specified adapter is alive and
+ *	capable of handling requests, returning 0 if alive.
+ */
+static int aac_src_check_health(struct aac_dev *dev)
+{
+	u32 status = src_readl(dev, MUnit.OMR);
+
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (unlikely(status & SELF_TEST_FAILED))
+		return -1;
+
+	/*
+	 *	Check to see if the board panic'd.
+	 */
+	if (unlikely(status & KERNEL_PANIC))
+		return (status >> 16) & 0xFF;
+	/*
+	 *	Wait for the adapter to be up and running.
+	 */
+	if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
+		return -3;
+	/*
+	 *	Everything is OK
+	 */
+	return 0;
+}
+
+/**
+ *	aac_src_deliver_message
+ *	@fib: fib to issue
+ *
+ *	Will send a fib, returning 0 if successful.
+ */
+static int aac_src_deliver_message(struct fib *fib)
+{
+	struct aac_dev *dev = fib->dev;
+	struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
+	unsigned long qflags;
+	u32 fibsize;
+	u64 address;
+	struct aac_fib_xporthdr *pFibX;
+
+	spin_lock_irqsave(q->lock, qflags);
+	q->numpending++;
+	spin_unlock_irqrestore(q->lock, qflags);
+
+	/* Calculate the amount to the fibsize bits */
+	fibsize = (sizeof(struct aac_fib_xporthdr) +
+		fib->hw_fib_va->header.Size + 127) / 128 - 1;
+	if (fibsize > (ALIGN32 - 1))
+		fibsize = ALIGN32 - 1;
+
+    /* Fill XPORT header */
+	pFibX = (struct aac_fib_xporthdr *)
+		((unsigned char *)fib->hw_fib_va -
+		sizeof(struct aac_fib_xporthdr));
+	pFibX->Handle = fib->hw_fib_va->header.SenderData + 1;
+	pFibX->HostAddress = fib->hw_fib_pa;
+	pFibX->Size = fib->hw_fib_va->header.Size;
+	address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr);
+
+	src_writel(dev, MUnit.IQ_H, (u32)(address >> 32));
+	src_writel(dev, MUnit.IQ_L, (u32)(address & 0xffffffff) + fibsize);
+	return 0;
+}
+
+/**
+ *	aac_src_ioremap
+ *	@size: mapping resize request
+ *
+ */
+static int aac_src_ioremap(struct aac_dev *dev, u32 size)
+{
+	if (!size) {
+		iounmap(dev->regs.src.bar0);
+		dev->regs.src.bar0 = NULL;
+		iounmap(dev->base);
+		dev->base = NULL;
+		return 0;
+	}
+	dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
+		AAC_MIN_SRC_BAR1_SIZE);
+	dev->base = NULL;
+	if (dev->regs.src.bar1 == NULL)
+		return -1;
+	dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base,
+				size);
+	if (dev->base == NULL) {
+		iounmap(dev->regs.src.bar1);
+		dev->regs.src.bar1 = NULL;
+		return -1;
+	}
+	dev->IndexRegs = &((struct src_registers __iomem *)
+		dev->base)->IndexRegs;
+	return 0;
+}
+
+static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
+{
+	u32 var, reset_mask;
+
+	if (bled >= 0) {
+		if (bled)
+			printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
+				dev->name, dev->id, bled);
+		bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
+			0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
+			if (bled || (var != 0x00000001))
+				bled = -EINVAL;
+		if (dev->supplement_adapter_info.SupportedOptions2 &
+			AAC_OPTION_DOORBELL_RESET) {
+			src_writel(dev, MUnit.IDR, reset_mask);
+			msleep(5000); /* Delay 5 seconds */
+		}
+	}
+
+	if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
+		return -ENODEV;
+
+	if (startup_timeout < 300)
+		startup_timeout = 300;
+
+	return 0;
+}
+
+/**
+ *	aac_src_select_comm	-	Select communications method
+ *	@dev: Adapter
+ *	@comm: communications method
+ */
+int aac_src_select_comm(struct aac_dev *dev, int comm)
+{
+	switch (comm) {
+	case AAC_COMM_MESSAGE:
+		dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
+		dev->a_ops.adapter_intr = aac_src_intr_message;
+		dev->a_ops.adapter_deliver = aac_src_deliver_message;
+		break;
+	default:
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ *  aac_src_init	-	initialize an Cardinal Frey Bar card
+ *  @dev: device to configure
+ *
+ */
+
+int aac_src_init(struct aac_dev *dev)
+{
+	unsigned long start;
+	unsigned long status;
+	int restart = 0;
+	int instance = dev->id;
+	const char *name = dev->name;
+
+	dev->a_ops.adapter_ioremap = aac_src_ioremap;
+	dev->a_ops.adapter_comm = aac_src_select_comm;
+
+	dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
+	if (aac_adapter_ioremap(dev, dev->base_size)) {
+		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+		goto error_iounmap;
+	}
+
+	/* Failure to reset here is an option ... */
+	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+	if ((aac_reset_devices || reset_devices) &&
+		!aac_src_restart_adapter(dev, 0))
+		++restart;
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	status = src_readl(dev, MUnit.OMR);
+	if (status & KERNEL_PANIC) {
+		if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+			goto error_iounmap;
+		++restart;
+	}
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	status = src_readl(dev, MUnit.OMR);
+	if (status & SELF_TEST_FAILED) {
+		printk(KERN_ERR "%s%d: adapter self-test failed.\n",
+			dev->name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the monitor panic'd while booting.
+	 */
+	if (status & MONITOR_PANIC) {
+		printk(KERN_ERR "%s%d: adapter monitor panic.\n",
+			dev->name, instance);
+		goto error_iounmap;
+	}
+	start = jiffies;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
+	 */
+	while (!((status = src_readl(dev, MUnit.OMR)) &
+		KERNEL_UP_AND_RUNNING)) {
+		if ((restart &&
+		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+		  time_after(jiffies, start+HZ*startup_timeout)) {
+			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
+					dev->name, instance, status);
+			goto error_iounmap;
+		}
+		if (!restart &&
+		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+		  time_after(jiffies, start + HZ *
+		  ((startup_timeout > 60)
+		    ? (startup_timeout - 60)
+		    : (startup_timeout / 2))))) {
+			if (likely(!aac_src_restart_adapter(dev,
+			    aac_src_check_health(dev))))
+				start = jiffies;
+			++restart;
+		}
+		msleep(1);
+	}
+	if (restart && aac_commit)
+		aac_commit = 1;
+	/*
+	 *	Fill in the common function dispatch table.
+	 */
+	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
+	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+	dev->a_ops.adapter_notify = aac_src_notify_adapter;
+	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+	dev->a_ops.adapter_check_health = aac_src_check_health;
+	dev->a_ops.adapter_restart = aac_src_restart_adapter;
+
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that we
+	 *	can handle.
+	 */
+	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
+	aac_adapter_disable_int(dev);
+	src_writel(dev, MUnit.ODR_C, 0xffffffff);
+	aac_adapter_enable_int(dev);
+
+	if (aac_init_adapter(dev) == NULL)
+		goto error_iounmap;
+	if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
+		goto error_iounmap;
+
+	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+
+	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+			IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+
+		if (dev->msi)
+			pci_disable_msi(dev->pdev);
+
+		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+			name, instance);
+		goto error_iounmap;
+	}
+	dev->dbg_base = pci_resource_start(dev->pdev, 2);
+	dev->dbg_base_mapped = dev->regs.src.bar1;
+	dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
+
+	aac_adapter_enable_int(dev);
+	/*
+	 *	Tell the adapter that all is configured, and it can
+	 * start accepting requests
+	 */
+	aac_src_start_adapter(dev);
+
+	return 0;
+
+error_iounmap:
+
+	return -1;
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index df2fc09ba479..b6d350ac4288 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"1.0.0"
+#define BNX2FC_VERSION		"1.0.1"
 
 #define PFX			"bnx2fc: "
 
@@ -84,9 +84,15 @@
 #define BNX2FC_NUM_MAX_SESS	128
 #define BNX2FC_NUM_MAX_SESS_LOG	(ilog2(BNX2FC_NUM_MAX_SESS))
 
-#define BNX2FC_MAX_OUTSTANDING_CMNDS	4096
+#define BNX2FC_MAX_OUTSTANDING_CMNDS	2048
+#define BNX2FC_CAN_QUEUE		BNX2FC_MAX_OUTSTANDING_CMNDS
+#define BNX2FC_ELSTM_XIDS		BNX2FC_CAN_QUEUE
 #define BNX2FC_MIN_PAYLOAD		256
 #define BNX2FC_MAX_PAYLOAD		2048
+#define BNX2FC_MFS			\
+			(BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header))
+#define BNX2FC_MINI_JUMBO_MTU		2500
+
 
 #define BNX2FC_RQ_BUF_SZ		256
 #define BNX2FC_RQ_BUF_LOG_SZ		(ilog2(BNX2FC_RQ_BUF_SZ))
@@ -98,7 +104,8 @@
 #define BNX2FC_CONFQ_WQE_SIZE		(sizeof(struct fcoe_confqe))
 #define BNX2FC_5771X_DB_PAGE_SIZE	128
 
-#define BNX2FC_MAX_TASKS		BNX2FC_MAX_OUTSTANDING_CMNDS
+#define BNX2FC_MAX_TASKS		\
+			     (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS)
 #define BNX2FC_TASK_SIZE		128
 #define	BNX2FC_TASKS_PER_PAGE		(PAGE_SIZE/BNX2FC_TASK_SIZE)
 #define BNX2FC_TASK_CTX_ARR_SZ		(BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
@@ -112,10 +119,10 @@
 #define BNX2FC_WRITE			(1 << 0)
 
 #define BNX2FC_MIN_XID			0
-#define BNX2FC_MAX_XID			(BNX2FC_MAX_OUTSTANDING_CMNDS - 1)
-#define FCOE_MIN_XID			(BNX2FC_MAX_OUTSTANDING_CMNDS)
-#define FCOE_MAX_XID		\
-			(BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256))
+#define BNX2FC_MAX_XID			\
+			(BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1)
+#define FCOE_MIN_XID			(BNX2FC_MAX_XID + 1)
+#define FCOE_MAX_XID			(FCOE_MIN_XID + 4095)
 #define BNX2FC_MAX_LUN			0xFFFF
 #define BNX2FC_MAX_FCP_TGT		256
 #define BNX2FC_MAX_CMD_LEN		16
@@ -125,7 +132,6 @@
 
 #define BNX2FC_WAIT_CNT			120
 #define BNX2FC_FW_TIMEOUT		(3 * HZ)
-
 #define PORT_MAX			2
 
 #define CMD_SCSI_STATUS(Cmnd)		((Cmnd)->SCp.Status)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e476e8753079..e2e647509a73 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -21,7 +21,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME		"bnx2fc"
 #define DRV_MODULE_VERSION	BNX2FC_VERSION
-#define DRV_MODULE_RELDATE	"Jan 25, 2011"
+#define DRV_MODULE_RELDATE	"Mar 17, 2011"
 
 
 static char version[] __devinitdata =
@@ -437,17 +437,16 @@ static int bnx2fc_l2_rcv_thread(void *arg)
 	set_current_state(TASK_INTERRUPTIBLE);
 	while (!kthread_should_stop()) {
 		schedule();
-		set_current_state(TASK_RUNNING);
 		spin_lock_bh(&bg->fcoe_rx_list.lock);
 		while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
 			spin_unlock_bh(&bg->fcoe_rx_list.lock);
 			bnx2fc_recv_frame(skb);
 			spin_lock_bh(&bg->fcoe_rx_list.lock);
 		}
+		__set_current_state(TASK_INTERRUPTIBLE);
 		spin_unlock_bh(&bg->fcoe_rx_list.lock);
-		set_current_state(TASK_INTERRUPTIBLE);
 	}
-	set_current_state(TASK_RUNNING);
+	__set_current_state(TASK_RUNNING);
 	return 0;
 }
 
@@ -569,7 +568,6 @@ int bnx2fc_percpu_io_thread(void *arg)
 	set_current_state(TASK_INTERRUPTIBLE);
 	while (!kthread_should_stop()) {
 		schedule();
-		set_current_state(TASK_RUNNING);
 		spin_lock_bh(&p->fp_work_lock);
 		while (!list_empty(&p->work_list)) {
 			list_splice_init(&p->work_list, &work_list);
@@ -583,10 +581,10 @@ int bnx2fc_percpu_io_thread(void *arg)
 
 			spin_lock_bh(&p->fp_work_lock);
 		}
+		__set_current_state(TASK_INTERRUPTIBLE);
 		spin_unlock_bh(&p->fp_work_lock);
-		set_current_state(TASK_INTERRUPTIBLE);
 	}
-	set_current_state(TASK_RUNNING);
+	__set_current_state(TASK_RUNNING);
 
 	return 0;
 }
@@ -661,31 +659,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
 	return 0;
 }
 
-static int  bnx2fc_mfs_update(struct fc_lport *lport)
-{
-	struct fcoe_port *port = lport_priv(lport);
-	struct bnx2fc_hba *hba = port->priv;
-	struct net_device *netdev = hba->netdev;
-	u32 mfs;
-	u32 max_mfs;
-
-	mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
-			     sizeof(struct fcoe_crc_eof));
-	max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header);
-	BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs);
-	if (mfs > max_mfs)
-		mfs = max_mfs;
-
-	/* Adjust mfs to be a multiple of 256 bytes */
-	mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) *
-			BNX2FC_MIN_PAYLOAD);
-	mfs = mfs + sizeof(struct fc_frame_header);
-
-	BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs);
-	if (fc_set_mfs(lport, mfs))
-		return -EINVAL;
-	return 0;
-}
 static void bnx2fc_link_speed_update(struct fc_lport *lport)
 {
 	struct fcoe_port *port = lport_priv(lport);
@@ -754,7 +727,7 @@ static int bnx2fc_net_config(struct fc_lport *lport)
 	    !hba->phys_dev->ethtool_ops->get_pauseparam)
 		return -EOPNOTSUPP;
 
-	if (bnx2fc_mfs_update(lport))
+	if (fc_set_mfs(lport, BNX2FC_MFS))
 		return -EINVAL;
 
 	skb_queue_head_init(&port->fcoe_pending_queue);
@@ -825,14 +798,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event)
 		if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
 			printk(KERN_ERR "indicate_netevent: "\
 					"adapter is not UP!!\n");
-		/* fall thru to update mfs if MTU has changed */
-	case NETDEV_CHANGEMTU:
-		BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n");
-		bnx2fc_mfs_update(lport);
-		mutex_lock(&lport->lp_mutex);
-		list_for_each_entry(vport, &lport->vports, list)
-			bnx2fc_mfs_update(vport);
-		mutex_unlock(&lport->lp_mutex);
 		break;
 
 	case NETDEV_DOWN:
@@ -1095,13 +1060,6 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
 	struct netdev_hw_addr *ha;
 	int sel_san_mac = 0;
 
-	/* Do not support for bonding device */
-	if ((netdev->priv_flags & IFF_MASTER_ALB) ||
-			(netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
-			(netdev->priv_flags & IFF_MASTER_8023AD)) {
-		return -EOPNOTSUPP;
-	}
-
 	/* setup Source MAC Address */
 	rcu_read_lock();
 	for_each_dev_addr(physdev, ha) {
@@ -1432,16 +1390,9 @@ static int bnx2fc_destroy(struct net_device *netdev)
 	struct net_device *phys_dev;
 	int rc = 0;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
+	rtnl_lock();
 
 	mutex_lock(&bnx2fc_dev_lock);
-#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
-	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
-		rc = -ENODEV;
-		goto netdev_err;
-	}
-#endif
 	/* obtain physical netdev */
 	if (netdev->priv_flags & IFF_802_1Q_VLAN)
 		phys_dev = vlan_dev_real_dev(netdev);
@@ -1805,18 +1756,10 @@ static int bnx2fc_disable(struct net_device *netdev)
 	struct ethtool_drvinfo drvinfo;
 	int rc = 0;
 
-	if (!rtnl_trylock()) {
-		printk(KERN_ERR PFX "retrying for rtnl_lock\n");
-		return -EIO;
-	}
+	rtnl_lock();
 
 	mutex_lock(&bnx2fc_dev_lock);
 
-	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
-		rc = -ENODEV;
-		goto nodev;
-	}
-
 	/* obtain physical netdev */
 	if (netdev->priv_flags & IFF_802_1Q_VLAN)
 		phys_dev = vlan_dev_real_dev(netdev);
@@ -1867,19 +1810,11 @@ static int bnx2fc_enable(struct net_device *netdev)
 	struct ethtool_drvinfo drvinfo;
 	int rc = 0;
 
-	if (!rtnl_trylock()) {
-		printk(KERN_ERR PFX "retrying for rtnl_lock\n");
-		return -EIO;
-	}
+	rtnl_lock();
 
 	BNX2FC_MISC_DBG("Entered %s\n", __func__);
 	mutex_lock(&bnx2fc_dev_lock);
 
-	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
-		rc = -ENODEV;
-		goto nodev;
-	}
-
 	/* obtain physical netdev */
 	if (netdev->priv_flags & IFF_802_1Q_VLAN)
 		phys_dev = vlan_dev_real_dev(netdev);
@@ -1942,18 +1877,9 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
 		return -EIO;
 	}
 
-	if (!rtnl_trylock()) {
-		printk(KERN_ERR "trying for rtnl_lock\n");
-		return -EIO;
-	}
-	mutex_lock(&bnx2fc_dev_lock);
+	rtnl_lock();
 
-#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
-	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
-		rc = -ENODEV;
-		goto mod_err;
-	}
-#endif
+	mutex_lock(&bnx2fc_dev_lock);
 
 	if (!try_module_get(THIS_MODULE)) {
 		rc = -EINVAL;
@@ -2506,7 +2432,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
 	.change_queue_type	= fc_change_queue_type,
 	.this_id		= -1,
 	.cmd_per_lun		= 3,
-	.can_queue		= (BNX2FC_MAX_OUTSTANDING_CMNDS/2),
+	.can_queue		= BNX2FC_CAN_QUEUE,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.sg_tablesize		= BNX2FC_MAX_BDS_PER_CMD,
 	.max_sectors		= 512,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 4f4096836742..1b680e288c56 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -87,7 +87,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
 	fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
 	fcoe_init1.task_list_pbl_addr_hi =
 				(u32) ((u64) hba->task_ctx_bd_dma >> 32);
-	fcoe_init1.mtu = hba->netdev->mtu;
+	fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
 
 	fcoe_init1.flags = (PAGE_SHIFT <<
 				FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
@@ -590,7 +590,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
 
 		num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
 
+		spin_lock_bh(&tgt->tgt_lock);
 		rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
+		spin_unlock_bh(&tgt->tgt_lock);
+
 		if (rq_data) {
 			buf = rq_data;
 		} else {
@@ -603,8 +606,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
 			}
 
 			for (i = 0; i < num_rq; i++) {
+				spin_lock_bh(&tgt->tgt_lock);
 				rq_data = (unsigned char *)
 					   bnx2fc_get_next_rqe(tgt, 1);
+				spin_unlock_bh(&tgt->tgt_lock);
 				len = BNX2FC_RQ_BUF_SZ;
 				memcpy(buf1, rq_data, len);
 				buf1 += len;
@@ -615,13 +620,15 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
 
 		if (buf != rq_data)
 			kfree(buf);
+		spin_lock_bh(&tgt->tgt_lock);
 		bnx2fc_return_rqe(tgt, num_rq);
+		spin_unlock_bh(&tgt->tgt_lock);
 		break;
 
 	case FCOE_ERROR_DETECTION_CQE_TYPE:
 		/*
-		 *In case of error reporting CQE a single RQ entry
-		 * is consumes.
+		 * In case of error reporting CQE a single RQ entry
+		 * is consumed.
 		 */
 		spin_lock_bh(&tgt->tgt_lock);
 		num_rq = 1;
@@ -705,6 +712,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
 		 *In case of warning reporting CQE a single RQ entry
 		 * is consumes.
 		 */
+		spin_lock_bh(&tgt->tgt_lock);
 		num_rq = 1;
 		err_entry = (struct fcoe_err_report_entry *)
 			     bnx2fc_get_next_rqe(tgt, 1);
@@ -717,6 +725,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
 			err_entry->tx_buf_off, err_entry->rx_buf_off);
 
 		bnx2fc_return_rqe(tgt, 1);
+		spin_unlock_bh(&tgt->tgt_lock);
 		break;
 
 	default:
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 0f1dd23730db..d3fc302c241a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -11,6 +11,9 @@
  */
 
 #include "bnx2fc.h"
+
+#define RESERVE_FREE_LIST_INDEX num_possible_cpus()
+
 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
 			   int bd_index);
 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
@@ -242,8 +245,9 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
 	u32 mem_size;
 	u16 xid;
 	int i;
-	int num_ios;
+	int num_ios, num_pri_ios;
 	size_t bd_tbl_sz;
+	int arr_sz = num_possible_cpus() + 1;
 
 	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
 		printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
@@ -263,14 +267,14 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
 	}
 
 	cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
-				  num_possible_cpus(), GFP_KERNEL);
+				  arr_sz, GFP_KERNEL);
 	if (!cmgr->free_list) {
 		printk(KERN_ERR PFX "failed to alloc free_list\n");
 		goto mem_err;
 	}
 
 	cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
-				       num_possible_cpus(), GFP_KERNEL);
+				       arr_sz, GFP_KERNEL);
 	if (!cmgr->free_list_lock) {
 		printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
 		goto mem_err;
@@ -279,13 +283,18 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
 	cmgr->hba = hba;
 	cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
 
-	for (i = 0; i < num_possible_cpus(); i++)  {
+	for (i = 0; i < arr_sz; i++)  {
 		INIT_LIST_HEAD(&cmgr->free_list[i]);
 		spin_lock_init(&cmgr->free_list_lock[i]);
 	}
 
-	/* Pre-allocated pool of bnx2fc_cmds */
+	/*
+	 * Pre-allocated pool of bnx2fc_cmds.
+	 * Last entry in the free list array is the free list
+	 * of slow path requests.
+	 */
 	xid = BNX2FC_MIN_XID;
+	num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS;
 	for (i = 0; i < num_ios; i++) {
 		io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
 
@@ -298,11 +307,13 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
 		INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
 
 		io_req->xid = xid++;
-		if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS)
-			printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n",
-				io_req->xid);
-		list_add_tail(&io_req->link,
-			&cmgr->free_list[io_req->xid % num_possible_cpus()]);
+		if (i < num_pri_ios)
+			list_add_tail(&io_req->link,
+				&cmgr->free_list[io_req->xid %
+						 num_possible_cpus()]);
+		else
+			list_add_tail(&io_req->link,
+				&cmgr->free_list[num_possible_cpus()]);
 		io_req++;
 	}
 
@@ -389,7 +400,7 @@ free_cmd_pool:
 	if (!cmgr->free_list)
 		goto free_cmgr;
 
-	for (i = 0; i < num_possible_cpus(); i++)  {
+	for (i = 0; i < num_possible_cpus() + 1; i++)  {
 		struct list_head *list;
 		struct list_head *tmp;
 
@@ -413,6 +424,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
 	struct bnx2fc_cmd *io_req;
 	struct list_head *listp;
 	struct io_bdt *bd_tbl;
+	int index = RESERVE_FREE_LIST_INDEX;
 	u32 max_sqes;
 	u16 xid;
 
@@ -432,26 +444,26 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
 	 * NOTE: Free list insertions and deletions are protected with
 	 * cmgr lock
 	 */
-	spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
-	if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) ||
+	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+	if ((list_empty(&(cmd_mgr->free_list[index]))) ||
 	    (tgt->num_active_ios.counter  >= max_sqes)) {
 		BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
 			"ios(%d):sqes(%d)\n",
 			tgt->num_active_ios.counter, tgt->max_sqes);
-		if (list_empty(&(cmd_mgr->free_list[smp_processor_id()])))
+		if (list_empty(&(cmd_mgr->free_list[index])))
 			printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
-		spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+		spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
 		return NULL;
 	}
 
 	listp = (struct list_head *)
-			cmd_mgr->free_list[smp_processor_id()].next;
+			cmd_mgr->free_list[index].next;
 	list_del_init(listp);
 	io_req = (struct bnx2fc_cmd *) listp;
 	xid = io_req->xid;
 	cmd_mgr->cmds[xid] = io_req;
 	atomic_inc(&tgt->num_active_ios);
-	spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
 
 	INIT_LIST_HEAD(&io_req->link);
 
@@ -479,27 +491,30 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
 	struct io_bdt *bd_tbl;
 	u32 max_sqes;
 	u16 xid;
+	int index = get_cpu();
 
 	max_sqes = BNX2FC_SCSI_MAX_SQES;
 	/*
 	 * NOTE: Free list insertions and deletions are protected with
 	 * cmgr lock
 	 */
-	spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
-	if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) ||
+	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+	if ((list_empty(&cmd_mgr->free_list[index])) ||
 	    (tgt->num_active_ios.counter  >= max_sqes)) {
-		spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+		spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
+		put_cpu();
 		return NULL;
 	}
 
 	listp = (struct list_head *)
-		cmd_mgr->free_list[smp_processor_id()].next;
+		cmd_mgr->free_list[index].next;
 	list_del_init(listp);
 	io_req = (struct bnx2fc_cmd *) listp;
 	xid = io_req->xid;
 	cmd_mgr->cmds[xid] = io_req;
 	atomic_inc(&tgt->num_active_ios);
-	spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
+	put_cpu();
 
 	INIT_LIST_HEAD(&io_req->link);
 
@@ -522,8 +537,15 @@ void bnx2fc_cmd_release(struct kref *ref)
 	struct bnx2fc_cmd *io_req = container_of(ref,
 						struct bnx2fc_cmd, refcount);
 	struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
+	int index;
 
-	spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+	if (io_req->cmd_type == BNX2FC_SCSI_CMD)
+		index = io_req->xid % num_possible_cpus();
+	else
+		index = RESERVE_FREE_LIST_INDEX;
+
+
+	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
 	if (io_req->cmd_type != BNX2FC_SCSI_CMD)
 		bnx2fc_free_mp_resc(io_req);
 	cmd_mgr->cmds[io_req->xid] = NULL;
@@ -531,9 +553,10 @@ void bnx2fc_cmd_release(struct kref *ref)
 	list_del_init(&io_req->link);
 	/* Add it to the free list */
 	list_add(&io_req->link,
-			&cmd_mgr->free_list[smp_processor_id()]);
+			&cmd_mgr->free_list[index]);
 	atomic_dec(&io_req->tgt->num_active_ios);
-	spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
+
 }
 
 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 7ea93af60260..7cc05e4e82d5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -304,10 +304,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
 				" not sent to FW\n");
 
 	/* Free session resources */
-	spin_lock_bh(&tgt->cq_lock);
 	bnx2fc_free_session_resc(hba, tgt);
 	bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
-	spin_unlock_bh(&tgt->cq_lock);
 }
 
 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
@@ -830,11 +828,13 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
 		tgt->rq = NULL;
 	}
 	/* Free CQ */
+	spin_lock_bh(&tgt->cq_lock);
 	if (tgt->cq) {
 		dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
 				    tgt->cq, tgt->cq_dma);
 		tgt->cq = NULL;
 	}
+	spin_unlock_bh(&tgt->cq_lock);
 	/* Free SQ */
 	if (tgt->sq) {
 		dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 8eeb39ffa37f..e98ae33f1295 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -132,14 +132,25 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
 	if (page_count(sg_page(sg)) >= 1 && !recv)
 		return;
 
-	segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+	if (recv) {
+		segment->atomic_mapped = true;
+		segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+	} else {
+		segment->atomic_mapped = false;
+		/* the xmit path can sleep with the page mapped so use kmap */
+		segment->sg_mapped = kmap(sg_page(sg));
+	}
+
 	segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
 }
 
 void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
 {
 	if (segment->sg_mapped) {
-		kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+		if (segment->atomic_mapped)
+			kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+		else
+			kunmap(sg_page(segment->sg));
 		segment->sg_mapped = NULL;
 		segment->data = NULL;
 	}
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 14de249917f8..88928f00aa2d 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
 #/*******************************************************************
 # * This file is part of the Emulex Linux Device Driver for         *
 # * Fibre Channel Host Bus Adapters.                                *
-# * Copyright (C) 2004-2006 Emulex.  All rights reserved.           *
+# * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
 # * EMULEX and SLI are trademarks of Emulex.                        *
 # * www.emulex.com                                                  *
 # *                                                                 *
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b64c6da870d3..60e98a62f308 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -539,6 +539,8 @@ struct lpfc_hba {
 		(struct lpfc_hba *, uint32_t);
 	int (*lpfc_hba_down_link)
 		(struct lpfc_hba *, uint32_t);
+	int (*lpfc_selective_reset)
+		(struct lpfc_hba *);
 
 	/* SLI4 specific HBA data structure */
 	struct lpfc_sli4_hba sli4_hba;
@@ -895,7 +897,18 @@ lpfc_worker_wake_up(struct lpfc_hba *phba)
 	return;
 }
 
-static inline void
+static inline int
+lpfc_readl(void __iomem *addr, uint32_t *data)
+{
+	uint32_t temp;
+	temp = readl(addr);
+	if (temp == 0xffffffff)
+		return -EIO;
+	*data = temp;
+	return 0;
+}
+
+static inline int
 lpfc_sli_read_hs(struct lpfc_hba *phba)
 {
 	/*
@@ -904,15 +917,17 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
 	 */
 	phba->sli.slistat.err_attn_event++;
 
-	/* Save status info */
-	phba->work_hs = readl(phba->HSregaddr);
-	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
-	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
+	/* Save status info and check for unplug error */
+	if (lpfc_readl(phba->HSregaddr, &phba->work_hs) ||
+		lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) ||
+		lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) {
+		return -EIO;
+	}
 
 	/* Clear chip Host Attention error bit */
 	writel(HA_ERATT, phba->HAregaddr);
 	readl(phba->HAregaddr); /* flush */
 	phba->pport->stopped = 1;
 
-	return;
+	return 0;
 }
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index e7c020df12fa..4e0faa00b96f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -685,7 +685,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
  * -EIO reset not configured or error posting the event
  * zero for success
  **/
-static int
+int
 lpfc_selective_reset(struct lpfc_hba *phba)
 {
 	struct completion online_compl;
@@ -746,7 +746,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
 	int status = -EINVAL;
 
 	if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
-		status = lpfc_selective_reset(phba);
+		status = phba->lpfc_selective_reset(phba);
 
 	if (status == 0)
 		return strlen(buf);
@@ -1224,7 +1224,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
 	if (val & ENABLE_FCP_RING_POLLING) {
 		if ((val & DISABLE_FCP_RING_INT) &&
 		    !(old_val & DISABLE_FCP_RING_INT)) {
-			creg_val = readl(phba->HCregaddr);
+			if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+				spin_unlock_irq(&phba->hbalock);
+				return -EINVAL;
+			}
 			creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
 			writel(creg_val, phba->HCregaddr);
 			readl(phba->HCregaddr); /* flush */
@@ -1242,7 +1245,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
 		spin_unlock_irq(&phba->hbalock);
 		del_timer(&phba->fcp_poll_timer);
 		spin_lock_irq(&phba->hbalock);
-		creg_val = readl(phba->HCregaddr);
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			spin_unlock_irq(&phba->hbalock);
+			return -EINVAL;
+		}
 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
 		writel(creg_val, phba->HCregaddr);
 		readl(phba->HCregaddr); /* flush */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 0dd43bb91618..793b9f1131fb 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2009-2010 Emulex.  All rights reserved.                *
+ * Copyright (C) 2009-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -348,7 +348,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
 	dd_data->context_un.iocb.bmp = bmp;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
-		creg_val = readl(phba->HCregaddr);
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			rc = -EIO ;
+			goto free_cmdiocbq;
+		}
 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
 		writel(creg_val, phba->HCregaddr);
 		readl(phba->HCregaddr); /* flush */
@@ -599,7 +602,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 	dd_data->context_un.iocb.ndlp = ndlp;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
-		creg_val = readl(phba->HCregaddr);
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			rc = -EIO;
+			goto linkdown_err;
+		}
 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
 		writel(creg_val, phba->HCregaddr);
 		readl(phba->HCregaddr); /* flush */
@@ -613,6 +619,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 	else
 		rc = -EIO;
 
+linkdown_err:
 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
@@ -1357,7 +1364,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
 	dd_data->context_un.iocb.ndlp = ndlp;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
-		creg_val = readl(phba->HCregaddr);
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			rc = -IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
 		writel(creg_val, phba->HCregaddr);
 		readl(phba->HCregaddr); /* flush */
@@ -2479,16 +2489,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 
 	from = (uint8_t *)dd_data->context_un.mbox.mb;
 	job = dd_data->context_un.mbox.set_job;
-	size = job->reply_payload.payload_len;
-	job->reply->reply_payload_rcv_len =
-		sg_copy_from_buffer(job->reply_payload.sg_list,
-				job->reply_payload.sg_cnt,
-				from, size);
-	job->reply->result = 0;
+	if (job) {
+		size = job->reply_payload.payload_len;
+		job->reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->reply_payload.sg_list,
+					job->reply_payload.sg_cnt,
+					from, size);
+		job->reply->result = 0;
 
+		job->dd_data = NULL;
+		job->job_done(job);
+	}
 	dd_data->context_un.mbox.set_job = NULL;
-	job->dd_data = NULL;
-	job->job_done(job);
 	/* need to hold the lock until we call job done to hold off
 	 * the timeout handler returning to the midlayer while
 	 * we are stillprocessing the job
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 3d40023f4804..f0b332f4eedb 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -254,8 +254,8 @@ uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
 void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
 			   uint32_t);
 void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
-
-void lpfc_reset_barrier(struct lpfc_hba * phba);
+int lpfc_selective_reset(struct lpfc_hba *);
+void lpfc_reset_barrier(struct lpfc_hba *);
 int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
 int lpfc_sli_brdkill(struct lpfc_hba *);
 int lpfc_sli_brdreset(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8e28edf9801e..735028fedda5 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -89,7 +89,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
 		return 0;
 
 	/* Read the HBA Host Attention Register */
-	ha_copy = readl(phba->HAregaddr);
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		return 1;
 
 	if (!(ha_copy & HA_LATT))
 		return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 94ae37c5111a..95f11ed79463 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1344,7 +1344,7 @@ typedef struct {		/* FireFly BIU registers */
 #define HS_FFER1       0x80000000	/* Bit 31 */
 #define HS_CRIT_TEMP   0x00000100	/* Bit 8  */
 #define HS_FFERM       0xFF000100	/* Mask for error bits 31:24 and 8 */
-
+#define UNPLUG_ERR     0x00000001	/* Indicate pci hot unplug */
 /* Host Control Register */
 
 #define HC_REG_OFFSET  12	/* Byte offset from register base address */
@@ -1713,6 +1713,17 @@ struct lpfc_pde6 {
 #define pde6_apptagval_WORD	word2
 };
 
+struct lpfc_pde7 {
+	uint32_t word0;
+#define pde7_type_SHIFT		24
+#define pde7_type_MASK		0x000000ff
+#define pde7_type_WORD		word0
+#define pde7_rsvd0_SHIFT	0
+#define pde7_rsvd0_MASK		0x00ffffff
+#define pde7_rsvd0_WORD		word0
+	uint32_t addrHigh;
+	uint32_t addrLow;
+};
 
 /* Structure for MB Command LOAD_SM and DOWN_LOAD */
 
@@ -3621,7 +3632,7 @@ typedef struct _IOCB {	/* IOCB structure */
 		ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
 		QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
 		struct rcv_seq64 rcvseq64;	/* RCV_SEQ64 and RCV_CONT64 */
-		struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */
+		struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */
 		uint32_t ulpWord[IOCB_WORD_SZ - 2];	/* generic 6 'words' */
 	} un;
 	union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index c7178d60c7bf..8433ac0d9fb4 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -215,7 +215,7 @@ struct lpfc_sli4_flags {
 #define lpfc_fip_flag_WORD word0
 };
 
-struct sli4_bls_acc {
+struct sli4_bls_rsp {
 	uint32_t word0_rsvd;      /* Word0 must be reserved */
 	uint32_t word1;
 #define lpfc_abts_orig_SHIFT      0
@@ -231,6 +231,16 @@ struct sli4_bls_acc {
 #define lpfc_abts_oxid_MASK       0x0000FFFF
 #define lpfc_abts_oxid_WORD       word2
 	uint32_t word3;
+#define lpfc_vndr_code_SHIFT	0
+#define lpfc_vndr_code_MASK	0x000000FF
+#define lpfc_vndr_code_WORD	word3
+#define lpfc_rsn_expln_SHIFT	8
+#define lpfc_rsn_expln_MASK	0x000000FF
+#define lpfc_rsn_expln_WORD	word3
+#define lpfc_rsn_code_SHIFT	16
+#define lpfc_rsn_code_MASK	0x000000FF
+#define lpfc_rsn_code_WORD	word3
+
 	uint32_t word4;
 	uint32_t word5_rsvd;	/* Word5 must be reserved */
 };
@@ -711,21 +721,27 @@ struct lpfc_sli4_cfg_mhdr {
 union lpfc_sli4_cfg_shdr {
 	struct {
 		uint32_t word6;
-#define lpfc_mbox_hdr_opcode_SHIFT		0
-#define lpfc_mbox_hdr_opcode_MASK		0x000000FF
-#define lpfc_mbox_hdr_opcode_WORD		word6
-#define lpfc_mbox_hdr_subsystem_SHIFT		8
-#define lpfc_mbox_hdr_subsystem_MASK		0x000000FF
-#define lpfc_mbox_hdr_subsystem_WORD		word6
-#define lpfc_mbox_hdr_port_number_SHIFT		16
-#define lpfc_mbox_hdr_port_number_MASK		0x000000FF
-#define lpfc_mbox_hdr_port_number_WORD		word6
-#define lpfc_mbox_hdr_domain_SHIFT		24
-#define lpfc_mbox_hdr_domain_MASK		0x000000FF
-#define lpfc_mbox_hdr_domain_WORD		word6
+#define lpfc_mbox_hdr_opcode_SHIFT	0
+#define lpfc_mbox_hdr_opcode_MASK	0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD	word6
+#define lpfc_mbox_hdr_subsystem_SHIFT	8
+#define lpfc_mbox_hdr_subsystem_MASK	0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD	word6
+#define lpfc_mbox_hdr_port_number_SHIFT	16
+#define lpfc_mbox_hdr_port_number_MASK	0x000000FF
+#define lpfc_mbox_hdr_port_number_WORD	word6
+#define lpfc_mbox_hdr_domain_SHIFT	24
+#define lpfc_mbox_hdr_domain_MASK	0x000000FF
+#define lpfc_mbox_hdr_domain_WORD	word6
 		uint32_t timeout;
 		uint32_t request_length;
-		uint32_t reserved9;
+		uint32_t word9;
+#define lpfc_mbox_hdr_version_SHIFT	0
+#define lpfc_mbox_hdr_version_MASK	0x000000FF
+#define lpfc_mbox_hdr_version_WORD	word9
+#define LPFC_Q_CREATE_VERSION_2	2
+#define LPFC_Q_CREATE_VERSION_1	1
+#define LPFC_Q_CREATE_VERSION_0	0
 	} request;
 	struct {
 		uint32_t word6;
@@ -917,9 +933,12 @@ struct cq_context {
 #define LPFC_CQ_CNT_512		0x1
 #define LPFC_CQ_CNT_1024	0x2
 	uint32_t word1;
-#define lpfc_cq_eq_id_SHIFT		22
+#define lpfc_cq_eq_id_SHIFT		22	/* Version 0 Only */
 #define lpfc_cq_eq_id_MASK		0x000000FF
 #define lpfc_cq_eq_id_WORD		word1
+#define lpfc_cq_eq_id_2_SHIFT		0 	/* Version 2 Only */
+#define lpfc_cq_eq_id_2_MASK		0x0000FFFF
+#define lpfc_cq_eq_id_2_WORD		word1
 	uint32_t reserved0;
 	uint32_t reserved1;
 };
@@ -929,6 +948,9 @@ struct lpfc_mbx_cq_create {
 	union {
 		struct {
 			uint32_t word0;
+#define lpfc_mbx_cq_create_page_size_SHIFT	16	/* Version 2 Only */
+#define lpfc_mbx_cq_create_page_size_MASK	0x000000FF
+#define lpfc_mbx_cq_create_page_size_WORD	word0
 #define lpfc_mbx_cq_create_num_pages_SHIFT	0
 #define lpfc_mbx_cq_create_num_pages_MASK	0x0000FFFF
 #define lpfc_mbx_cq_create_num_pages_WORD	word0
@@ -969,7 +991,7 @@ struct wq_context {
 struct lpfc_mbx_wq_create {
 	struct mbox_header header;
 	union {
-		struct {
+		struct {	/* Version 0 Request */
 			uint32_t word0;
 #define lpfc_mbx_wq_create_num_pages_SHIFT	0
 #define lpfc_mbx_wq_create_num_pages_MASK	0x0000FFFF
@@ -979,6 +1001,23 @@ struct lpfc_mbx_wq_create {
 #define lpfc_mbx_wq_create_cq_id_WORD		word0
 			struct dma_address page[LPFC_MAX_WQ_PAGE];
 		} request;
+		struct {	/* Version 1 Request */
+			uint32_t word0;	/* Word 0 is the same as in v0 */
+			uint32_t word1;
+#define lpfc_mbx_wq_create_page_size_SHIFT	0
+#define lpfc_mbx_wq_create_page_size_MASK	0x000000FF
+#define lpfc_mbx_wq_create_page_size_WORD	word1
+#define lpfc_mbx_wq_create_wqe_size_SHIFT	8
+#define lpfc_mbx_wq_create_wqe_size_MASK	0x0000000F
+#define lpfc_mbx_wq_create_wqe_size_WORD	word1
+#define LPFC_WQ_WQE_SIZE_64	0x5
+#define LPFC_WQ_WQE_SIZE_128	0x6
+#define lpfc_mbx_wq_create_wqe_count_SHIFT	16
+#define lpfc_mbx_wq_create_wqe_count_MASK	0x0000FFFF
+#define lpfc_mbx_wq_create_wqe_count_WORD	word1
+			uint32_t word2;
+			struct dma_address page[LPFC_MAX_WQ_PAGE-1];
+		} request_1;
 		struct {
 			uint32_t word0;
 #define lpfc_mbx_wq_create_q_id_SHIFT	0
@@ -1007,13 +1046,22 @@ struct lpfc_mbx_wq_destroy {
 #define LPFC_DATA_BUF_SIZE 2048
 struct rq_context {
 	uint32_t word0;
-#define lpfc_rq_context_rq_size_SHIFT	16
-#define lpfc_rq_context_rq_size_MASK	0x0000000F
-#define lpfc_rq_context_rq_size_WORD	word0
+#define lpfc_rq_context_rqe_count_SHIFT	16	/* Version 0 Only */
+#define lpfc_rq_context_rqe_count_MASK	0x0000000F
+#define lpfc_rq_context_rqe_count_WORD	word0
 #define LPFC_RQ_RING_SIZE_512		9	/* 512 entries */
 #define LPFC_RQ_RING_SIZE_1024		10	/* 1024 entries */
 #define LPFC_RQ_RING_SIZE_2048		11	/* 2048 entries */
 #define LPFC_RQ_RING_SIZE_4096		12	/* 4096 entries */
+#define lpfc_rq_context_rqe_count_1_SHIFT	16	/* Version 1 Only */
+#define lpfc_rq_context_rqe_count_1_MASK	0x0000FFFF
+#define lpfc_rq_context_rqe_count_1_WORD	word0
+#define lpfc_rq_context_rqe_size_SHIFT	8		/* Version 1 Only */
+#define lpfc_rq_context_rqe_size_MASK	0x0000000F
+#define lpfc_rq_context_rqe_size_WORD	word0
+#define lpfc_rq_context_page_size_SHIFT	0		/* Version 1 Only */
+#define lpfc_rq_context_page_size_MASK	0x000000FF
+#define lpfc_rq_context_page_size_WORD	word0
 	uint32_t reserved1;
 	uint32_t word2;
 #define lpfc_rq_context_cq_id_SHIFT	16
@@ -1022,7 +1070,7 @@ struct rq_context {
 #define lpfc_rq_context_buf_size_SHIFT	0
 #define lpfc_rq_context_buf_size_MASK	0x0000FFFF
 #define lpfc_rq_context_buf_size_WORD	word2
-	uint32_t reserved3;
+	uint32_t buffer_size;				/* Version 1 Only */
 };
 
 struct lpfc_mbx_rq_create {
@@ -1062,16 +1110,16 @@ struct lpfc_mbx_rq_destroy {
 
 struct mq_context {
 	uint32_t word0;
-#define lpfc_mq_context_cq_id_SHIFT	22
+#define lpfc_mq_context_cq_id_SHIFT	22 	/* Version 0 Only */
 #define lpfc_mq_context_cq_id_MASK	0x000003FF
 #define lpfc_mq_context_cq_id_WORD	word0
-#define lpfc_mq_context_count_SHIFT	16
-#define lpfc_mq_context_count_MASK	0x0000000F
-#define lpfc_mq_context_count_WORD	word0
-#define LPFC_MQ_CNT_16		0x5
-#define LPFC_MQ_CNT_32		0x6
-#define LPFC_MQ_CNT_64		0x7
-#define LPFC_MQ_CNT_128		0x8
+#define lpfc_mq_context_ring_size_SHIFT	16
+#define lpfc_mq_context_ring_size_MASK	0x0000000F
+#define lpfc_mq_context_ring_size_WORD	word0
+#define LPFC_MQ_RING_SIZE_16		0x5
+#define LPFC_MQ_RING_SIZE_32		0x6
+#define LPFC_MQ_RING_SIZE_64		0x7
+#define LPFC_MQ_RING_SIZE_128		0x8
 	uint32_t word1;
 #define lpfc_mq_context_valid_SHIFT	31
 #define lpfc_mq_context_valid_MASK	0x00000001
@@ -1105,9 +1153,12 @@ struct lpfc_mbx_mq_create_ext {
 	union {
 		struct {
 			uint32_t word0;
-#define lpfc_mbx_mq_create_ext_num_pages_SHIFT		0
-#define lpfc_mbx_mq_create_ext_num_pages_MASK		0x0000FFFF
-#define lpfc_mbx_mq_create_ext_num_pages_WORD		word0
+#define lpfc_mbx_mq_create_ext_num_pages_SHIFT	0
+#define lpfc_mbx_mq_create_ext_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_ext_num_pages_WORD	word0
+#define lpfc_mbx_mq_create_ext_cq_id_SHIFT	16	/* Version 1 Only */
+#define lpfc_mbx_mq_create_ext_cq_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_ext_cq_id_WORD	word0
 			uint32_t async_evt_bmap;
 #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT	LPFC_TRAILER_CODE_LINK
 #define lpfc_mbx_mq_create_ext_async_evt_link_MASK	0x00000001
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 35665cfb5689..e6ebe516cfbb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -507,7 +507,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 	phba->hba_flag &= ~HBA_ERATT_HANDLED;
 
 	/* Enable appropriate host interrupts */
-	status = readl(phba->HCregaddr);
+	if (lpfc_readl(phba->HCregaddr, &status)) {
+		spin_unlock_irq(&phba->hbalock);
+		return -EIO;
+	}
 	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
 	if (psli->num_rings > 0)
 		status |= HC_R0INT_ENA;
@@ -1222,7 +1225,10 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
 	/* Wait for the ER1 bit to clear.*/
 	while (phba->work_hs & HS_FFER1) {
 		msleep(100);
-		phba->work_hs = readl(phba->HSregaddr);
+		if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
+			phba->work_hs = UNPLUG_ERR ;
+			break;
+		}
 		/* If driver is unloading let the worker thread continue */
 		if (phba->pport->load_flag & FC_UNLOADING) {
 			phba->work_hs = 0;
@@ -4474,6 +4480,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
 {
 	phba->lpfc_hba_init_link = lpfc_hba_init_link;
 	phba->lpfc_hba_down_link = lpfc_hba_down_link;
+	phba->lpfc_selective_reset = lpfc_selective_reset;
 	switch (dev_grp) {
 	case LPFC_PCI_DEV_LP:
 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@@ -5385,13 +5392,16 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
 	int i, port_error = 0;
 	uint32_t if_type;
 
+	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
+	memset(&reg_data, 0, sizeof(reg_data));
 	if (!phba->sli4_hba.PSMPHRregaddr)
 		return -ENODEV;
 
 	/* Wait up to 30 seconds for the SLI Port POST done and ready */
 	for (i = 0; i < 3000; i++) {
-		portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr);
-		if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) {
+		if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+			&portsmphr_reg.word0) ||
+			(bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
 			/* Port has a fatal POST error, break out */
 			port_error = -ENODEV;
 			break;
@@ -5472,9 +5482,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
 			break;
 		case LPFC_SLI_INTF_IF_TYPE_2:
 			/* Final checks.  The port status should be clean. */
-			reg_data.word0 =
-				readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
-			if (bf_get(lpfc_sliport_status_err, &reg_data)) {
+			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+				&reg_data.word0) ||
+				bf_get(lpfc_sliport_status_err, &reg_data)) {
 				phba->work_status[0] =
 					readl(phba->sli4_hba.u.if_type2.
 					      ERR1regaddr);
@@ -6760,9 +6770,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
 			 * the loop again.
 			 */
 			for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
-				reg_data.word0 =
-					readl(phba->sli4_hba.u.if_type2.
-					      STATUSregaddr);
+				if (lpfc_readl(phba->sli4_hba.u.if_type2.
+					      STATUSregaddr, &reg_data.word0)) {
+					rc = -ENODEV;
+					break;
+				}
 				if (bf_get(lpfc_sliport_status_rdy, &reg_data))
 					break;
 				if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
@@ -6783,8 +6795,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
 			}
 
 			/* Detect any port errors. */
-			reg_data.word0 = readl(phba->sli4_hba.u.if_type2.
-					       STATUSregaddr);
+			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+				 &reg_data.word0)) {
+				rc = -ENODEV;
+				break;
+			}
 			if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
 			    (rdy_chk >= 1000)) {
 				phba->work_status[0] = readl(
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bf34178b80bf..2b962b020cfb 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -1514,10 +1514,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
 	struct lpfc_pde5 *pde5 = NULL;
 	struct lpfc_pde6 *pde6 = NULL;
-	struct ulp_bde64 *prot_bde = NULL;
+	struct lpfc_pde7 *pde7 = NULL;
 	dma_addr_t dataphysaddr, protphysaddr;
 	unsigned short curr_data = 0, curr_prot = 0;
-	unsigned int split_offset, protgroup_len;
+	unsigned int split_offset;
+	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
 	unsigned int protgrp_blks, protgrp_bytes;
 	unsigned int remainder, subtotal;
 	int status;
@@ -1585,23 +1586,33 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		bpl++;
 
 		/* setup the first BDE that points to protection buffer */
-		prot_bde = (struct ulp_bde64 *) bpl;
-		protphysaddr = sg_dma_address(sgpe);
-		prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
-		prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
-		protgroup_len = sg_dma_len(sgpe);
+		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
+		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
 
 		/* must be integer multiple of the DIF block length */
 		BUG_ON(protgroup_len % 8);
 
+		pde7 = (struct lpfc_pde7 *) bpl;
+		memset(pde7, 0, sizeof(struct lpfc_pde7));
+		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
+
+		pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
+		pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
+
 		protgrp_blks = protgroup_len / 8;
 		protgrp_bytes = protgrp_blks * blksize;
 
-		prot_bde->tus.f.bdeSize = protgroup_len;
-		prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR;
-		prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
+		/* check if this pde is crossing the 4K boundary; if so split */
+		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
+			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
+			protgroup_offset += protgroup_remainder;
+			protgrp_blks = protgroup_remainder / 8;
+			protgrp_bytes = protgroup_remainder * blksize;
+		} else {
+			protgroup_offset = 0;
+			curr_prot++;
+		}
 
-		curr_prot++;
 		num_bde++;
 
 		/* setup BDE's for data blocks associated with DIF data */
@@ -1653,6 +1664,13 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
 		}
 
+		if (protgroup_offset) {
+			/* update the reference tag */
+			reftag += protgrp_blks;
+			bpl++;
+			continue;
+		}
+
 		/* are we done ? */
 		if (curr_prot == protcnt) {
 			alldone = 1;
@@ -1675,6 +1693,7 @@ out:
 
 	return num_bde;
 }
+
 /*
  * Given a SCSI command that supports DIF, determine composition of protection
  * groups involved in setting up buffer lists
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 2ee0374a9908..4746dcd756dd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -3477,7 +3477,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
 	int retval = 0;
 
 	/* Read the HBA Host Status Register */
-	status = readl(phba->HSregaddr);
+	if (lpfc_readl(phba->HSregaddr, &status))
+		return 1;
 
 	/*
 	 * Check status register every 100ms for 5 retries, then every
@@ -3502,7 +3503,10 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
 			lpfc_sli_brdrestart(phba);
 		}
 		/* Read the HBA Host Status Register */
-		status = readl(phba->HSregaddr);
+		if (lpfc_readl(phba->HSregaddr, &status)) {
+			retval = 1;
+			break;
+		}
 	}
 
 	/* Check to see if any errors occurred during init */
@@ -3584,7 +3588,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
 	uint32_t __iomem *resp_buf;
 	uint32_t __iomem *mbox_buf;
 	volatile uint32_t mbox;
-	uint32_t hc_copy;
+	uint32_t hc_copy, ha_copy, resp_data;
 	int  i;
 	uint8_t hdrtype;
 
@@ -3601,12 +3605,15 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
 	resp_buf = phba->MBslimaddr;
 
 	/* Disable the error attention */
-	hc_copy = readl(phba->HCregaddr);
+	if (lpfc_readl(phba->HCregaddr, &hc_copy))
+		return;
 	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
 	readl(phba->HCregaddr); /* flush */
 	phba->link_flag |= LS_IGNORE_ERATT;
 
-	if (readl(phba->HAregaddr) & HA_ERATT) {
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		return;
+	if (ha_copy & HA_ERATT) {
 		/* Clear Chip error bit */
 		writel(HA_ERATT, phba->HAregaddr);
 		phba->pport->stopped = 1;
@@ -3620,11 +3627,18 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
 	mbox_buf = phba->MBslimaddr;
 	writel(mbox, mbox_buf);
 
-	for (i = 0;
-	     readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
-		mdelay(1);
-
-	if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
+	for (i = 0; i < 50; i++) {
+		if (lpfc_readl((resp_buf + 1), &resp_data))
+			return;
+		if (resp_data != ~(BARRIER_TEST_PATTERN))
+			mdelay(1);
+		else
+			break;
+	}
+	resp_data = 0;
+	if (lpfc_readl((resp_buf + 1), &resp_data))
+		return;
+	if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
 		    phba->pport->stopped)
 			goto restore_hc;
@@ -3633,13 +3647,26 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
 	}
 
 	((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
-	for (i = 0; readl(resp_buf) != mbox &&  i < 500; i++)
-		mdelay(1);
+	resp_data = 0;
+	for (i = 0; i < 500; i++) {
+		if (lpfc_readl(resp_buf, &resp_data))
+			return;
+		if (resp_data != mbox)
+			mdelay(1);
+		else
+			break;
+	}
 
 clear_errat:
 
-	while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
-		mdelay(1);
+	while (++i < 500) {
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			return;
+		if (!(ha_copy & HA_ERATT))
+			mdelay(1);
+		else
+			break;
+	}
 
 	if (readl(phba->HAregaddr) & HA_ERATT) {
 		writel(HA_ERATT, phba->HAregaddr);
@@ -3686,7 +3713,11 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
 
 	/* Disable the error attention */
 	spin_lock_irq(&phba->hbalock);
-	status = readl(phba->HCregaddr);
+	if (lpfc_readl(phba->HCregaddr, &status)) {
+		spin_unlock_irq(&phba->hbalock);
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return 1;
+	}
 	status &= ~HC_ERINT_ENA;
 	writel(status, phba->HCregaddr);
 	readl(phba->HCregaddr); /* flush */
@@ -3720,11 +3751,12 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
 	 * 3 seconds we still set HBA_ERROR state because the status of the
 	 * board is now undefined.
 	 */
-	ha_copy = readl(phba->HAregaddr);
-
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		return 1;
 	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
 		mdelay(100);
-		ha_copy = readl(phba->HAregaddr);
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			return 1;
 	}
 
 	del_timer_sync(&psli->mbox_tmo);
@@ -4018,7 +4050,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
 	uint32_t status, i = 0;
 
 	/* Read the HBA Host Status Register */
-	status = readl(phba->HSregaddr);
+	if (lpfc_readl(phba->HSregaddr, &status))
+		return -EIO;
 
 	/* Check status register to see what current state is */
 	i = 0;
@@ -4073,7 +4106,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
 			lpfc_sli_brdrestart(phba);
 		}
 		/* Read the HBA Host Status Register */
-		status = readl(phba->HSregaddr);
+		if (lpfc_readl(phba->HSregaddr, &status))
+			return -EIO;
 	}
 
 	/* Check to see if any errors occurred during init */
@@ -5136,7 +5170,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
 	MAILBOX_t *mb;
 	struct lpfc_sli *psli = &phba->sli;
 	uint32_t status, evtctr;
-	uint32_t ha_copy;
+	uint32_t ha_copy, hc_copy;
 	int i;
 	unsigned long timeout;
 	unsigned long drvr_flag = 0;
@@ -5202,15 +5236,17 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
 		goto out_not_finished;
 	}
 
-	if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
-	    !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
-		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
-		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+	if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+		if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
+			!(hc_copy & HC_MBINT_ENA)) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
 				"(%d):2528 Mailbox command x%x cannot "
 				"issue Data: x%x x%x\n",
 				pmbox->vport ? pmbox->vport->vpi : 0,
 				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
-		goto out_not_finished;
+			goto out_not_finished;
+		}
 	}
 
 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
@@ -5408,11 +5444,19 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
 			word0 = le32_to_cpu(word0);
 		} else {
 			/* First read mbox status word */
-			word0 = readl(phba->MBslimaddr);
+			if (lpfc_readl(phba->MBslimaddr, &word0)) {
+				spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+				goto out_not_finished;
+			}
 		}
 
 		/* Read the HBA Host Attention Register */
-		ha_copy = readl(phba->HAregaddr);
+		if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+			spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+			goto out_not_finished;
+		}
 		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
 							     mb->mbxCommand) *
 					   1000) + jiffies;
@@ -5463,7 +5507,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
 				word0 = readl(phba->MBslimaddr);
 			}
 			/* Read the HBA Host Attention Register */
-			ha_copy = readl(phba->HAregaddr);
+			if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+				spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+				goto out_not_finished;
+			}
 		}
 
 		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
@@ -6263,7 +6311,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
 				bf_set(lpfc_sli4_sge_last, sgl, 1);
 			else
 				bf_set(lpfc_sli4_sge_last, sgl, 0);
-			sgl->word2 = cpu_to_le32(sgl->word2);
 			/* swap the size field back to the cpu so we
 			 * can assign it to the sgl.
 			 */
@@ -6283,6 +6330,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
 				bf_set(lpfc_sli4_sge_offset, sgl, offset);
 				offset += bde.tus.f.bdeSize;
 			}
+			sgl->word2 = cpu_to_le32(sgl->word2);
 			bpl++;
 			sgl++;
 		}
@@ -6528,9 +6576,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
 			sizeof(struct ulp_bde64);
 		for (i = 0; i < numBdes; i++) {
-			if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64)
-				break;
 			bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+			if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+				break;
 			xmit_len += bde.tus.f.bdeSize;
 		}
 		/* word3 iocb=IO_TAG wqe=request_payload_len */
@@ -6620,15 +6668,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		xritag = 0;
 	break;
 	case CMD_XMIT_BLS_RSP64_CX:
-		/* As BLS ABTS-ACC WQE is very different from other WQEs,
+		/* As BLS ABTS RSP WQE is very different from other WQEs,
 		 * we re-construct this WQE here based on information in
 		 * iocbq from scratch.
 		 */
 		memset(wqe, 0, sizeof(union lpfc_wqe));
 		/* OX_ID is invariable to who sent ABTS to CT exchange */
 		bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
-		       bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc));
-		if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) ==
+		       bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
+		if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
 		    LPFC_ABTS_UNSOL_INT) {
 			/* ABTS sent by initiator to CT exchange, the
 			 * RX_ID field will be filled with the newly
@@ -6642,7 +6690,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 			 * RX_ID from ABTS.
 			 */
 			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
-			       bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc));
+			       bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
 		}
 		bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
 		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
@@ -6653,6 +6701,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
 		       LPFC_WQE_LENLOC_NONE);
 		/* Overwrite the pre-set comnd type with OTHER_COMMAND */
 		command_type = OTHER_COMMAND;
+		if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
+			bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
+			bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
+			bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
+		}
+
 	break;
 	case CMD_XRI_ABORTED_CX:
 	case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
@@ -6701,7 +6758,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 
 	if (piocb->sli4_xritag == NO_XRI) {
 		if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
-		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
+		    piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX)
 			sglq = NULL;
 		else {
 			if (pring->txq_cnt) {
@@ -8194,7 +8252,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
 	piocb->iocb_flag &= ~LPFC_IO_WAKE;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
-		creg_val = readl(phba->HCregaddr);
+		if (lpfc_readl(phba->HCregaddr, &creg_val))
+			return IOCB_ERROR;
 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
 		writel(creg_val, phba->HCregaddr);
 		readl(phba->HCregaddr); /* flush */
@@ -8236,7 +8295,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
 	}
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
-		creg_val = readl(phba->HCregaddr);
+		if (lpfc_readl(phba->HCregaddr, &creg_val))
+			return IOCB_ERROR;
 		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
 		writel(creg_val, phba->HCregaddr);
 		readl(phba->HCregaddr); /* flush */
@@ -8387,10 +8447,13 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
 	uint32_t ha_copy;
 
 	/* Read chip Host Attention (HA) register */
-	ha_copy = readl(phba->HAregaddr);
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		goto unplug_err;
+
 	if (ha_copy & HA_ERATT) {
 		/* Read host status register to retrieve error event */
-		lpfc_sli_read_hs(phba);
+		if (lpfc_sli_read_hs(phba))
+			goto unplug_err;
 
 		/* Check if there is a deferred error condition is active */
 		if ((HS_FFER1 & phba->work_hs) &&
@@ -8409,6 +8472,15 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
 		return 1;
 	}
 	return 0;
+
+unplug_err:
+	/* Set the driver HS work bitmap */
+	phba->work_hs |= UNPLUG_ERR;
+	/* Set the driver HA work bitmap */
+	phba->work_ha |= HA_ERATT;
+	/* Indicate polling handles this ERATT */
+	phba->hba_flag |= HBA_ERATT_HANDLED;
+	return 1;
 }
 
 /**
@@ -8436,8 +8508,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
 	switch (if_type) {
 	case LPFC_SLI_INTF_IF_TYPE_0:
-		uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
-		uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
+		if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
+			&uerr_sta_lo) ||
+			lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
+			&uerr_sta_hi)) {
+			phba->work_hs |= UNPLUG_ERR;
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
 		if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
 		    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8456,9 +8535,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
 		}
 		break;
 	case LPFC_SLI_INTF_IF_TYPE_2:
-		portstat_reg.word0 =
-			readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
-		portsmphr = readl(phba->sli4_hba.PSMPHRregaddr);
+		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+			&portstat_reg.word0) ||
+			lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+			&portsmphr)){
+			phba->work_hs |= UNPLUG_ERR;
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
 		if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
 			phba->work_status[0] =
 				readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
@@ -8639,7 +8724,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
 			return IRQ_NONE;
 		/* Need to read HA REG for slow-path events */
 		spin_lock_irqsave(&phba->hbalock, iflag);
-		ha_copy = readl(phba->HAregaddr);
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			goto unplug_error;
 		/* If somebody is waiting to handle an eratt don't process it
 		 * here. The brdkill function will do this.
 		 */
@@ -8665,7 +8751,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
 		}
 
 		/* Clear up only attention source related to slow-path */
-		hc_copy = readl(phba->HCregaddr);
+		if (lpfc_readl(phba->HCregaddr, &hc_copy))
+			goto unplug_error;
+
 		writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
 			HC_LAINT_ENA | HC_ERINT_ENA),
 			phba->HCregaddr);
@@ -8688,7 +8776,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
 				 */
 				spin_lock_irqsave(&phba->hbalock, iflag);
 				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
-				control = readl(phba->HCregaddr);
+				if (lpfc_readl(phba->HCregaddr, &control))
+					goto unplug_error;
 				control &= ~HC_LAINT_ENA;
 				writel(control, phba->HCregaddr);
 				readl(phba->HCregaddr); /* flush */
@@ -8708,7 +8797,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
 			status >>= (4*LPFC_ELS_RING);
 			if (status & HA_RXMASK) {
 				spin_lock_irqsave(&phba->hbalock, iflag);
-				control = readl(phba->HCregaddr);
+				if (lpfc_readl(phba->HCregaddr, &control))
+					goto unplug_error;
 
 				lpfc_debugfs_slow_ring_trc(phba,
 				"ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
@@ -8741,7 +8831,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
 		}
 		spin_lock_irqsave(&phba->hbalock, iflag);
 		if (work_ha_copy & HA_ERATT) {
-			lpfc_sli_read_hs(phba);
+			if (lpfc_sli_read_hs(phba))
+				goto unplug_error;
 			/*
 			 * Check if there is a deferred error condition
 			 * is active
@@ -8872,6 +8963,9 @@ send_current_mbox:
 		lpfc_worker_wake_up(phba);
 	}
 	return IRQ_HANDLED;
+unplug_error:
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return IRQ_HANDLED;
 
 } /* lpfc_sli_sp_intr_handler */
 
@@ -8919,7 +9013,8 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id)
 		if (lpfc_intr_state_check(phba))
 			return IRQ_NONE;
 		/* Need to read HA REG for FCP ring and other ring events */
-		ha_copy = readl(phba->HAregaddr);
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			return IRQ_HANDLED;
 		/* Clear up only attention source related to fast-path */
 		spin_lock_irqsave(&phba->hbalock, iflag);
 		/*
@@ -9004,7 +9099,11 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
 		return IRQ_NONE;
 
 	spin_lock(&phba->hbalock);
-	phba->ha_copy = readl(phba->HAregaddr);
+	if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
+		spin_unlock(&phba->hbalock);
+		return IRQ_HANDLED;
+	}
+
 	if (unlikely(!phba->ha_copy)) {
 		spin_unlock(&phba->hbalock);
 		return IRQ_NONE;
@@ -9026,7 +9125,10 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
 	}
 
 	/* Clear attention sources except link and error attentions */
-	hc_copy = readl(phba->HCregaddr);
+	if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
+		spin_unlock(&phba->hbalock);
+		return IRQ_HANDLED;
+	}
 	writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
 		| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
 		phba->HCregaddr);
@@ -10403,7 +10505,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
 	if (!phba->sli4_hba.pc_sli4_params.supported)
 		hw_page_size = SLI4_PAGE_SIZE;
 
-
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mbox)
 		return -ENOMEM;
@@ -10413,11 +10514,22 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
 			 LPFC_MBOX_OPCODE_CQ_CREATE,
 			 length, LPFC_SLI4_MBX_EMBED);
 	cq_create = &mbox->u.mqe.un.cq_create;
+	shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
 	bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
 		    cq->page_count);
 	bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
 	bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
-	bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.cqv);
+	if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
+		bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
+		       (PAGE_SIZE/SLI4_PAGE_SIZE));
+		bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
+		       eq->queue_id);
+	} else {
+		bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
+		       eq->queue_id);
+	}
 	switch (cq->entry_count) {
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -10449,7 +10561,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
 
 	/* The IOCTL status is embedded in the mailbox subheader. */
-	shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
 	if (shdr_status || shdr_add_status || rc) {
@@ -10515,20 +10626,20 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
 	bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
 	switch (mq->entry_count) {
 	case 16:
-		bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
-		       LPFC_MQ_CNT_16);
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_16);
 		break;
 	case 32:
-		bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
-		       LPFC_MQ_CNT_32);
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_32);
 		break;
 	case 64:
-		bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
-		       LPFC_MQ_CNT_64);
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_64);
 		break;
 	case 128:
-		bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
-		       LPFC_MQ_CNT_128);
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_128);
 		break;
 	}
 	list_for_each_entry(dmabuf, &mq->page_list, list) {
@@ -10586,6 +10697,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
 			 length, LPFC_SLI4_MBX_EMBED);
 
 	mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+	shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
 	bf_set(lpfc_mbx_mq_create_ext_num_pages,
 	       &mq_create_ext->u.request, mq->page_count);
 	bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
@@ -10598,9 +10710,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
 	       &mq_create_ext->u.request, 1);
 	bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
 	       &mq_create_ext->u.request, 1);
-	bf_set(lpfc_mq_context_cq_id,
-	       &mq_create_ext->u.request.context, cq->queue_id);
 	bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.mqv);
+	if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
+		bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
+		       cq->queue_id);
+	else
+		bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+		       cq->queue_id);
 	switch (mq->entry_count) {
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -10610,20 +10728,24 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
 			return -EINVAL;
 		/* otherwise default to smallest count (drop through) */
 	case 16:
-		bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
-		       LPFC_MQ_CNT_16);
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_16);
 		break;
 	case 32:
-		bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
-		       LPFC_MQ_CNT_32);
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_32);
 		break;
 	case 64:
-		bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
-		       LPFC_MQ_CNT_64);
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_64);
 		break;
 	case 128:
-		bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
-		       LPFC_MQ_CNT_128);
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_128);
 		break;
 	}
 	list_for_each_entry(dmabuf, &mq->page_list, list) {
@@ -10634,7 +10756,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
 					putPaddrHigh(dmabuf->phys);
 	}
 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
-	shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
 	mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
 			      &mq_create_ext->u.response);
 	if (rc != MBX_SUCCESS) {
@@ -10711,6 +10832,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
 	uint32_t shdr_status, shdr_add_status;
 	union lpfc_sli4_cfg_shdr *shdr;
 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+	struct dma_address *page;
 
 	if (!phba->sli4_hba.pc_sli4_params.supported)
 		hw_page_size = SLI4_PAGE_SIZE;
@@ -10724,20 +10846,42 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
 			 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
 			 length, LPFC_SLI4_MBX_EMBED);
 	wq_create = &mbox->u.mqe.un.wq_create;
+	shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
 	bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
 		    wq->page_count);
 	bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
 		    cq->queue_id);
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.wqv);
+	if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
+		bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
+		       wq->entry_count);
+		switch (wq->entry_size) {
+		default:
+		case 64:
+			bf_set(lpfc_mbx_wq_create_wqe_size,
+			       &wq_create->u.request_1,
+			       LPFC_WQ_WQE_SIZE_64);
+			break;
+		case 128:
+			bf_set(lpfc_mbx_wq_create_wqe_size,
+			       &wq_create->u.request_1,
+			       LPFC_WQ_WQE_SIZE_128);
+			break;
+		}
+		bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
+		       (PAGE_SIZE/SLI4_PAGE_SIZE));
+		page = wq_create->u.request_1.page;
+	} else {
+		page = wq_create->u.request.page;
+	}
 	list_for_each_entry(dmabuf, &wq->page_list, list) {
 		memset(dmabuf->virt, 0, hw_page_size);
-		wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
-					putPaddrLow(dmabuf->phys);
-		wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
-					putPaddrHigh(dmabuf->phys);
+		page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
+		page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
 	}
 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
 	/* The IOCTL status is embedded in the mailbox subheader. */
-	shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
 	if (shdr_status || shdr_add_status || rc) {
@@ -10815,37 +10959,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
 			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
 			 length, LPFC_SLI4_MBX_EMBED);
 	rq_create = &mbox->u.mqe.un.rq_create;
-	switch (hrq->entry_count) {
-	default:
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"2535 Unsupported RQ count. (%d)\n",
-				hrq->entry_count);
-		if (hrq->entry_count < 512)
-			return -EINVAL;
-		/* otherwise default to smallest count (drop through) */
-	case 512:
-		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-		       LPFC_RQ_RING_SIZE_512);
-		break;
-	case 1024:
-		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-		       LPFC_RQ_RING_SIZE_1024);
-		break;
-	case 2048:
-		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-		       LPFC_RQ_RING_SIZE_2048);
-		break;
-	case 4096:
-		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-		       LPFC_RQ_RING_SIZE_4096);
-		break;
+	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.rqv);
+	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+		bf_set(lpfc_rq_context_rqe_count_1,
+		       &rq_create->u.request.context,
+		       hrq->entry_count);
+		rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
+	} else {
+		switch (hrq->entry_count) {
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2535 Unsupported RQ count. (%d)\n",
+					hrq->entry_count);
+			if (hrq->entry_count < 512)
+				return -EINVAL;
+			/* otherwise default to smallest count (drop through) */
+		case 512:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_512);
+			break;
+		case 1024:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_1024);
+			break;
+		case 2048:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_2048);
+			break;
+		case 4096:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_4096);
+			break;
+		}
+		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+		       LPFC_HDR_BUF_SIZE);
 	}
 	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
 	       cq->queue_id);
 	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
 	       hrq->page_count);
-	bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
-	       LPFC_HDR_BUF_SIZE);
 	list_for_each_entry(dmabuf, &hrq->page_list, list) {
 		memset(dmabuf->virt, 0, hw_page_size);
 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
@@ -10855,7 +11013,6 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
 	}
 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
 	/* The IOCTL status is embedded in the mailbox subheader. */
-	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
 	if (shdr_status || shdr_add_status || rc) {
@@ -10881,37 +11038,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
 			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
 			 length, LPFC_SLI4_MBX_EMBED);
-	switch (drq->entry_count) {
-	default:
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"2536 Unsupported RQ count. (%d)\n",
-				drq->entry_count);
-		if (drq->entry_count < 512)
-			return -EINVAL;
-		/* otherwise default to smallest count (drop through) */
-	case 512:
-		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-		       LPFC_RQ_RING_SIZE_512);
-		break;
-	case 1024:
-		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-		       LPFC_RQ_RING_SIZE_1024);
-		break;
-	case 2048:
-		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-		       LPFC_RQ_RING_SIZE_2048);
-		break;
-	case 4096:
-		bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-		       LPFC_RQ_RING_SIZE_4096);
-		break;
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.rqv);
+	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+		bf_set(lpfc_rq_context_rqe_count_1,
+		       &rq_create->u.request.context,
+		       hrq->entry_count);
+		rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
+	} else {
+		switch (drq->entry_count) {
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2536 Unsupported RQ count. (%d)\n",
+					drq->entry_count);
+			if (drq->entry_count < 512)
+				return -EINVAL;
+			/* otherwise default to smallest count (drop through) */
+		case 512:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_512);
+			break;
+		case 1024:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_1024);
+			break;
+		case 2048:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_2048);
+			break;
+		case 4096:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_4096);
+			break;
+		}
+		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+		       LPFC_DATA_BUF_SIZE);
 	}
 	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
 	       cq->queue_id);
 	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
 	       drq->page_count);
-	bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
-	       LPFC_DATA_BUF_SIZE);
 	list_for_each_entry(dmabuf, &drq->page_list, list) {
 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
 					putPaddrLow(dmabuf->phys);
@@ -11580,6 +11750,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
 	static char *rctl_names[] = FC_RCTL_NAMES_INIT;
 	char *type_names[] = FC_TYPE_NAMES_INIT;
 	struct fc_vft_header *fc_vft_hdr;
+	uint32_t *header = (uint32_t *) fc_hdr;
 
 	switch (fc_hdr->fh_r_ctl) {
 	case FC_RCTL_DD_UNCAT:		/* uncategorized information */
@@ -11628,10 +11799,15 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
 	default:
 		goto drop;
 	}
+
 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-			"2538 Received frame rctl:%s type:%s\n",
+			"2538 Received frame rctl:%s type:%s "
+			"Frame Data:%08x %08x %08x %08x %08x %08x\n",
 			rctl_names[fc_hdr->fh_r_ctl],
-			type_names[fc_hdr->fh_type]);
+			type_names[fc_hdr->fh_type],
+			be32_to_cpu(header[0]), be32_to_cpu(header[1]),
+			be32_to_cpu(header[2]), be32_to_cpu(header[3]),
+			be32_to_cpu(header[4]), be32_to_cpu(header[5]));
 	return 0;
 drop:
 	lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -11928,17 +12104,17 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
 }
 
 /**
- * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
+ * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
  * @phba: Pointer to HBA context object.
  * @cmd_iocbq: pointer to the command iocbq structure.
  * @rsp_iocbq: pointer to the response iocbq structure.
  *
- * This function handles the sequence abort accept iocb command complete
+ * This function handles the sequence abort response iocb command complete
  * event. It properly releases the memory allocated to the sequence abort
  * accept iocb.
  **/
 static void
-lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
+lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
 			     struct lpfc_iocbq *cmd_iocbq,
 			     struct lpfc_iocbq *rsp_iocbq)
 {
@@ -11947,15 +12123,15 @@ lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
 }
 
 /**
- * lpfc_sli4_seq_abort_acc - Accept sequence abort
+ * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
  * @phba: Pointer to HBA context object.
  * @fc_hdr: pointer to a FC frame header.
  *
- * This function sends a basic accept to a previous unsol sequence abort
+ * This function sends a basic response to a previous unsol sequence abort
  * event after aborting the sequence handling.
  **/
 static void
-lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
+lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
 			struct fc_frame_header *fc_hdr)
 {
 	struct lpfc_iocbq *ctiocb = NULL;
@@ -11963,6 +12139,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
 	uint16_t oxid, rxid;
 	uint32_t sid, fctl;
 	IOCB_t *icmd;
+	int rc;
 
 	if (!lpfc_is_link_up(phba))
 		return;
@@ -11983,7 +12160,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
 		+ phba->sli4_hba.max_cfg_param.xri_base))
 		lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
 
-	/* Allocate buffer for acc iocb */
+	/* Allocate buffer for rsp iocb */
 	ctiocb = lpfc_sli_get_iocbq(phba);
 	if (!ctiocb)
 		return;
@@ -12008,32 +12185,54 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
 
 	ctiocb->iocb_cmpl = NULL;
 	ctiocb->vport = phba->pport;
-	ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl;
+	ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+	ctiocb->sli4_xritag = NO_XRI;
+
+	/* If the oxid maps to the FCP XRI range or if it is out of range,
+	 * send a BLS_RJT.  The driver no longer has that exchange.
+	 * Override the IOCB for a BA_RJT.
+	 */
+	if (oxid > (phba->sli4_hba.max_cfg_param.max_xri +
+		    phba->sli4_hba.max_cfg_param.xri_base) ||
+	    oxid > (lpfc_sli4_get_els_iocb_cnt(phba) +
+		    phba->sli4_hba.max_cfg_param.xri_base)) {
+		icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+		bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+		bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+		bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+	}
 
 	if (fctl & FC_FC_EX_CTX) {
 		/* ABTS sent by responder to CT exchange, construction
 		 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
 		 * field and RX_ID from ABTS for RX_ID field.
 		 */
-		bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP);
-		bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid);
-		ctiocb->sli4_xritag = oxid;
+		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
+		bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
 	} else {
 		/* ABTS sent by initiator to CT exchange, construction
 		 * of BA_ACC will need to allocate a new XRI as for the
 		 * XRI_TAG and RX_ID fields.
 		 */
-		bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT);
-		bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI);
-		ctiocb->sli4_xritag = NO_XRI;
+		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
+		bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI);
 	}
-	bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid);
+	bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
 
-	/* Xmit CT abts accept on exchange <xid> */
+	/* Xmit CT abts response on exchange <xid> */
 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-			"1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
-			CMD_XMIT_BLS_RSP64_CX, phba->link_state);
-	lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+			"1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
+			icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+				"2925 Failed to issue CT ABTS RSP x%x on "
+				"xri x%x, Data x%x\n",
+				icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+				phba->link_state);
+		lpfc_sli_release_iocbq(phba, ctiocb);
+	}
 }
 
 /**
@@ -12081,7 +12280,7 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
 			lpfc_in_buf_free(phba, &dmabuf->dbuf);
 	}
 	/* Send basic accept (BA_ACC) to the abort requester */
-	lpfc_sli4_seq_abort_acc(phba, &fc_hdr);
+	lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
 }
 
 /**
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 595056b89608..1a3cbf88f2ce 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2009 Emulex.  All rights reserved.                *
+ * Copyright (C) 2009-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 0a4d376dbca5..2404d1d65563 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.21"
+#define LPFC_DRIVER_VERSION "8.3.22"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index e8a6f1cf1e4b..5e001ffd4c13 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1747,6 +1747,54 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
 	}
 }
 
+/**
+ * _base_display_hp_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc)
+{
+	if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID)
+		return;
+
+	switch (ioc->pdev->device) {
+	case MPI2_MFGPAGE_DEVID_SAS2004:
+		switch (ioc->pdev->subsystem_device) {
+		case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
+			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+			    MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
+			break;
+		default:
+			break;
+		}
+	case MPI2_MFGPAGE_DEVID_SAS2308_2:
+		switch (ioc->pdev->subsystem_device) {
+		case MPT2SAS_HP_2_4_INTERNAL_SSDID:
+			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+			    MPT2SAS_HP_2_4_INTERNAL_BRANDING);
+			break;
+		case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
+			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+			    MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
+			break;
+		case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
+			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+			    MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
+			break;
+		case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
+			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+			    MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
+			break;
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+}
+
 /**
  * _base_display_ioc_capabilities - Disply IOC's capabilities.
  * @ioc: per adapter object
@@ -1778,6 +1826,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
 
 	_base_display_dell_branding(ioc);
 	_base_display_intel_branding(ioc);
+	_base_display_hp_branding(ioc);
 
 	printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
 
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index a3f8aa9baea4..500328245f61 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -168,6 +168,26 @@
 #define MPT2SAS_INTEL_RMS2LL080_SSDID          0x350E
 #define MPT2SAS_INTEL_RMS2LL040_SSDID          0x350F
 
+
+/*
+ * HP HBA branding
+ */
+#define MPT2SAS_HP_3PAR_SSVID                0x1590
+#define MPT2SAS_HP_2_4_INTERNAL_BRANDING        "HP H220 Host Bus Adapter"
+#define MPT2SAS_HP_2_4_EXTERNAL_BRANDING        "HP H221 Host Bus Adapter"
+#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING "HP H222 Host Bus Adapter"
+#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING    "HP H220i Host Bus Adapter"
+#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING    "HP H210i Host Bus Adapter"
+
+/*
+ * HO HBA SSDIDs
+ */
+#define MPT2SAS_HP_2_4_INTERNAL_SSDID            0x0041
+#define MPT2SAS_HP_2_4_EXTERNAL_SSDID            0x0042
+#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID    0x0043
+#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID        0x0044
+#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID        0x0046
+
 /*
  * per target private data
  */
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 19ad34f381a5..938d045e4180 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -663,6 +663,13 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
 	{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
 	{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
 	{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+	{ PCI_VDEVICE(TTI, 0x2710), chip_9480 },
+	{ PCI_VDEVICE(TTI, 0x2720), chip_9480 },
+	{ PCI_VDEVICE(TTI, 0x2721), chip_9480 },
+	{ PCI_VDEVICE(TTI, 0x2722), chip_9480 },
+	{ PCI_VDEVICE(TTI, 0x2740), chip_9480 },
+	{ PCI_VDEVICE(TTI, 0x2744), chip_9480 },
+	{ PCI_VDEVICE(TTI, 0x2760), chip_9480 },
 
 	{ }	/* terminate list */
 };
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 2fc0045b1a52..c1f8d1b150f7 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -53,6 +53,9 @@
 #define PCI_DEVICE_ID_QLOGIC_ISP8022	0x8022
 #endif
 
+#define ISP4XXX_PCI_FN_1	0x1
+#define ISP4XXX_PCI_FN_2	0x3
+
 #define QLA_SUCCESS			0
 #define QLA_ERROR			1
 
@@ -233,9 +236,6 @@ struct ddb_entry {
 
 	unsigned long flags;	/* DDB Flags */
 
-	unsigned long dev_scan_wait_to_start_relogin;
-	unsigned long dev_scan_wait_to_complete_relogin;
-
 	uint16_t fw_ddb_index;	/* DDB firmware index */
 	uint16_t options;
 	uint32_t fw_ddb_device_state; /* F/W Device State  -- see ql4_fw.h */
@@ -289,8 +289,6 @@ struct ddb_entry {
  * DDB flags.
  */
 #define DF_RELOGIN		0	/* Relogin to device */
-#define DF_NO_RELOGIN		1	/* Do not relogin if IOCTL
-					 * logged it out */
 #define DF_ISNS_DISCOVERED	2	/* Device was discovered via iSNS */
 #define DF_FO_MASKED		3
 
@@ -376,7 +374,7 @@ struct scsi_qla_host {
 #define AF_LINK_UP			8 /* 0x00000100 */
 #define AF_IRQ_ATTACHED			10 /* 0x00000400 */
 #define AF_DISABLE_ACB_COMPLETE		11 /* 0x00000800 */
-#define AF_HBA_GOING_AWAY		12 /* 0x00001000 */
+#define AF_HA_REMOVAL			12 /* 0x00001000 */
 #define AF_INTx_ENABLED			15 /* 0x00008000 */
 #define AF_MSI_ENABLED			16 /* 0x00010000 */
 #define AF_MSIX_ENABLED			17 /* 0x00020000 */
@@ -479,7 +477,6 @@ struct scsi_qla_host {
 	uint32_t timer_active;
 
 	/* Recovery Timers */
-	uint32_t discovery_wait;
 	atomic_t check_relogin_timeouts;
 	uint32_t retry_reset_ha_cnt;
 	uint32_t isp_reset_timer;	/* reset test timer */
@@ -765,6 +762,5 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
 /* Defines for process_aen() */
 #define PROCESS_ALL_AENS	 0
 #define FLUSH_DDB_CHANGED_AENS	 1
-#define RELOGIN_DDB_CHANGED_AENS 2
 
 #endif	/*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index c1985792f034..31e2bf97198c 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -455,6 +455,7 @@ struct addr_ctrl_blk {
 	uint8_t res0;	/* 07 */
 	uint16_t eth_mtu_size;	/* 08-09 */
 	uint16_t add_fw_options;	/* 0A-0B */
+#define SERIALIZE_TASK_MGMT		0x0400
 
 	uint8_t hb_interval;	/* 0C */
 	uint8_t inst_num; /* 0D */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 8fad99b7eef4..cc53e3fbd78c 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -136,7 +136,6 @@ void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
 void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
 
 extern int ql4xextended_error_logging;
-extern int ql4xdiscoverywait;
 extern int ql4xdontresethba;
 extern int ql4xenablemsix;
 
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1629c48c35ef..bbb2e903d38a 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -723,13 +723,38 @@ int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err)
 	return relogin;
 }
 
+static void qla4xxx_flush_AENS(struct scsi_qla_host *ha)
+{
+	unsigned long wtime;
+
+	/* Flush the 0x8014 AEN from the firmware as a result of
+	 * Auto connect. We are basically doing get_firmware_ddb()
+	 * to determine whether we need to log back in or not.
+	 * Trying to do a set ddb before we have processed 0x8014
+	 * will result in another set_ddb() for the same ddb. In other
+	 * words there will be stale entries in the aen_q.
+	 */
+	wtime = jiffies + (2 * HZ);
+	do {
+		if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS)
+			if (ha->firmware_state & (BIT_2 | BIT_0))
+				return;
+
+		if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
+			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+
+		msleep(1000);
+	} while (!time_after_eq(jiffies, wtime));
+}
+
 /**
- * qla4xxx_configure_ddbs - builds driver ddb list
+ * qla4xxx_build_ddb_list - builds driver ddb list
  * @ha: Pointer to host adapter structure.
  *
  * This routine searches for all valid firmware ddb entries and builds
  * an internal ddb list. Ddbs that are considered valid are those with
  * a device state of SESSION_ACTIVE.
+ * A relogin (set_ddb) is issued for DDBs that are not online.
  **/
 static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
 {
@@ -744,6 +769,8 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
 	uint32_t ipv6_device;
 	uint32_t new_tgt;
 
+	qla4xxx_flush_AENS(ha);
+
 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
 			&fw_ddb_entry_dma, GFP_KERNEL);
 	if (fw_ddb_entry == NULL) {
@@ -847,144 +874,6 @@ exit_build_ddb_list_no_free:
 	return status;
 }
 
-struct qla4_relog_scan {
-	int halt_wait;
-	uint32_t conn_err;
-	uint32_t fw_ddb_index;
-	uint32_t next_fw_ddb_index;
-	uint32_t fw_ddb_device_state;
-};
-
-static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs)
-{
-	struct ddb_entry *ddb_entry;
-
-	if (qla4_is_relogin_allowed(ha, rs->conn_err)) {
-		/* We either have a device that is in
-		 * the process of relogging in or a
-		 * device that is waiting to be
-		 * relogged in */
-		rs->halt_wait = 0;
-
-		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
-							   rs->fw_ddb_index);
-		if (ddb_entry == NULL)
-			return QLA_ERROR;
-
-		if (ddb_entry->dev_scan_wait_to_start_relogin != 0
-		    && time_after_eq(jiffies,
-				     ddb_entry->
-				     dev_scan_wait_to_start_relogin))
-		{
-			ddb_entry->dev_scan_wait_to_start_relogin = 0;
-			qla4xxx_set_ddb_entry(ha, rs->fw_ddb_index, 0);
-		}
-	}
-	return QLA_SUCCESS;
-}
-
-static int qla4_scan_for_relogin(struct scsi_qla_host *ha,
-				 struct qla4_relog_scan *rs)
-{
-	int error;
-
-	/* scan for relogins
-	 * ----------------- */
-	for (rs->fw_ddb_index = 0; rs->fw_ddb_index < MAX_DDB_ENTRIES;
-	     rs->fw_ddb_index = rs->next_fw_ddb_index) {
-		if (qla4xxx_get_fwddb_entry(ha, rs->fw_ddb_index, NULL, 0,
-					    NULL, &rs->next_fw_ddb_index,
-					    &rs->fw_ddb_device_state,
-					    &rs->conn_err, NULL, NULL)
-		    == QLA_ERROR)
-			return QLA_ERROR;
-
-		if (rs->fw_ddb_device_state == DDB_DS_LOGIN_IN_PROCESS)
-			rs->halt_wait = 0;
-
-		if (rs->fw_ddb_device_state == DDB_DS_SESSION_FAILED ||
-		    rs->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) {
-			error = qla4_test_rdy(ha, rs);
-			if (error)
-				return error;
-		}
-
-		/* We know we've reached the last device when
-		 * next_fw_ddb_index is 0 */
-		if (rs->next_fw_ddb_index == 0)
-			break;
-	}
-	return QLA_SUCCESS;
-}
-
-/**
- * qla4xxx_devices_ready - wait for target devices to be logged in
- * @ha: pointer to adapter structure
- *
- * This routine waits up to ql4xdiscoverywait seconds
- * F/W database during driver load time.
- **/
-static int qla4xxx_devices_ready(struct scsi_qla_host *ha)
-{
-	int error;
-	unsigned long discovery_wtime;
-	struct qla4_relog_scan rs;
-
-	discovery_wtime = jiffies + (ql4xdiscoverywait * HZ);
-
-	DEBUG(printk("Waiting (%d) for devices ...\n", ql4xdiscoverywait));
-	do {
-		/* poll for AEN. */
-		qla4xxx_get_firmware_state(ha);
-		if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) {
-			/* Set time-between-relogin timer */
-			qla4xxx_process_aen(ha, RELOGIN_DDB_CHANGED_AENS);
-		}
-
-		/* if no relogins active or needed, halt discvery wait */
-		rs.halt_wait = 1;
-
-		error = qla4_scan_for_relogin(ha, &rs);
-
-		if (rs.halt_wait) {
-			DEBUG2(printk("scsi%ld: %s: Delay halted.  Devices "
-				      "Ready.\n", ha->host_no, __func__));
-			return QLA_SUCCESS;
-		}
-
-		msleep(2000);
-	} while (!time_after_eq(jiffies, discovery_wtime));
-
-	DEBUG3(qla4xxx_get_conn_event_log(ha));
-
-	return QLA_SUCCESS;
-}
-
-static void qla4xxx_flush_AENS(struct scsi_qla_host *ha)
-{
-	unsigned long wtime;
-
-	/* Flush the 0x8014 AEN from the firmware as a result of
-	 * Auto connect. We are basically doing get_firmware_ddb()
-	 * to determine whether we need to log back in or not.
-	 *  Trying to do a set ddb before we have processed 0x8014
-	 *  will result in another set_ddb() for the same ddb. In other
-	 *  words there will be stale entries in the aen_q.
-	 */
-	wtime = jiffies + (2 * HZ);
-	do {
-		if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS)
-			if (ha->firmware_state & (BIT_2 | BIT_0))
-				return;
-
-		if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
-			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
-
-		msleep(1000);
-	} while (!time_after_eq(jiffies, wtime));
-
-}
-
 static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
 {
 	uint16_t fw_ddb_index;
@@ -996,29 +885,12 @@ static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
 
 	for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++)
 		ha->fw_ddb_index_map[fw_ddb_index] =
-			(struct ddb_entry *)INVALID_ENTRY;
+		    (struct ddb_entry *)INVALID_ENTRY;
 
 	ha->tot_ddbs = 0;
 
-	qla4xxx_flush_AENS(ha);
-
-	/* Wait for an AEN */
-	qla4xxx_devices_ready(ha);
-
-	/*
-	 * First perform device discovery for active
-	 * fw ddb indexes and build
-	 * ddb list.
-	 */
-	if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR)
-		return status;
-
-	/*
-	 * Targets can come online after the inital discovery, so processing
-	 * the aens here will catch them.
-	 */
-	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
-		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
+	/* Perform device discovery and build ddb list. */
+	status = qla4xxx_build_ddb_list(ha);
 
 	return status;
 }
@@ -1537,7 +1409,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
 		uint32_t state, uint32_t conn_err)
 {
 	struct ddb_entry * ddb_entry;
-	uint32_t old_fw_ddb_device_state;
 
 	/* check for out of range index */
 	if (fw_ddb_index >= MAX_DDB_ENTRIES)
@@ -1553,27 +1424,18 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
 	}
 
 	/* Device already exists in our database. */
-	old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
 	DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for "
 		      "index [%d]\n", ha->host_no, __func__,
 		      ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
-	if (old_fw_ddb_device_state == state &&
-	    state == DDB_DS_SESSION_ACTIVE) {
-		if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
-			atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
-			iscsi_unblock_session(ddb_entry->sess);
-		}
-		return QLA_SUCCESS;
-	}
 
 	ddb_entry->fw_ddb_device_state = state;
 	/* Device is back online. */
-	if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
+	if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
+	   (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
 		atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
 		atomic_set(&ddb_entry->relogin_retry_count, 0);
 		atomic_set(&ddb_entry->relogin_timer, 0);
 		clear_bit(DF_RELOGIN, &ddb_entry->flags);
-		clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
 		iscsi_unblock_session(ddb_entry->sess);
 		iscsi_session_event(ddb_entry->sess,
 				    ISCSI_KEVENT_CREATE_SESSION);
@@ -1581,7 +1443,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
 		 * Change the lun state to READY in case the lun TIMEOUT before
 		 * the device came back.
 		 */
-	} else {
+	} else if (ddb_entry->fw_ddb_device_state != DDB_DS_SESSION_ACTIVE) {
 		/* Device went away, mark device missing */
 		if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) {
 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing "
@@ -1598,7 +1460,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
 		 */
 		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED &&
 		    !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
-		    !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) &&
 		    qla4_is_relogin_allowed(ha, conn_err)) {
 			/*
 			 * This triggers a relogin.  After the relogin_timer
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 03e028e6e809..2f40ac761cd4 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -801,7 +801,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
 			       &ha->reg->ctrl_status);
 			readl(&ha->reg->ctrl_status);
 
-			if (!test_bit(AF_HBA_GOING_AWAY, &ha->flags))
+			if (!test_bit(AF_HA_REMOVAL, &ha->flags))
 				set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
 
 			break;
@@ -1008,34 +1008,9 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
 					      mbox_sts[0], mbox_sts[2],
 					      mbox_sts[3]));
 				break;
-			} else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
-				/* for use during init time, we only want to
-				 * relogin non-active ddbs */
-				struct ddb_entry *ddb_entry;
-
-				ddb_entry =
-					/* FIXME: name length? */
-					qla4xxx_lookup_ddb_by_fw_index(ha,
-								       mbox_sts[2]);
-				if (!ddb_entry)
-					break;
-
-				ddb_entry->dev_scan_wait_to_complete_relogin =
-					0;
-				ddb_entry->dev_scan_wait_to_start_relogin =
-					jiffies +
-					((ddb_entry->default_time2wait +
-					  4) * HZ);
-
-				DEBUG2(printk("scsi%ld: ddb [%d] initiate"
-					      " RELOGIN after %d seconds\n",
-					      ha->host_no,
-					      ddb_entry->fw_ddb_index,
-					      ddb_entry->default_time2wait +
-					      4));
-				break;
 			}
-
+		case PROCESS_ALL_AENS:
+		default:
 			if (mbox_sts[1] == 0) {	/* Global DB change. */
 				qla4xxx_reinitialize_ddb_list(ha);
 			} else if (mbox_sts[1] == 1) {	/* Specific device. */
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index f65626aec7c1..f9d81c8372c3 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -32,6 +32,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
 	u_long wait_count;
 	uint32_t intr_status;
 	unsigned long flags = 0;
+	uint32_t dev_state;
 
 	/* Make sure that pointers are valid */
 	if (!mbx_cmd || !mbx_sts) {
@@ -40,12 +41,23 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
 		return status;
 	}
 
-	if (is_qla8022(ha) &&
-	    test_bit(AF_FW_RECOVERY, &ha->flags)) {
-		DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely "
-		    "completing mbx cmd as firmware recovery detected\n",
-		    ha->host_no, __func__));
-		return status;
+	if (is_qla8022(ha)) {
+		if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
+			DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+			    "prematurely completing mbx cmd as firmware "
+			    "recovery detected\n", ha->host_no, __func__));
+			return status;
+		}
+		/* Do not send any mbx cmd if h/w is in failed state*/
+		qla4_8xxx_idc_lock(ha);
+		dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+		qla4_8xxx_idc_unlock(ha);
+		if (dev_state == QLA82XX_DEV_FAILED) {
+			ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
+			    "failed state, do not send any mailbox commands\n",
+			    ha->host_no, __func__);
+			return status;
+		}
 	}
 
 	if ((is_aer_supported(ha)) &&
@@ -139,7 +151,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
 	if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
 	    test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
 	    test_bit(AF_ONLINE, &ha->flags) &&
-	    !test_bit(AF_HBA_GOING_AWAY, &ha->flags)) {
+	    !test_bit(AF_HA_REMOVAL, &ha->flags)) {
 		/* Do not poll for completion. Use completion queue */
 		set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
 		wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
@@ -395,9 +407,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
 	/*memcpy(ha->alias, init_fw_cb->Alias,
 	       min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
 
-	/* Save Command Line Paramater info */
-	ha->discovery_wait = ql4xdiscoverywait;
-
 	if (ha->acb_version == ACB_SUPPORTED) {
 		ha->ipv6_options = init_fw_cb->ipv6_opts;
 		ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts;
@@ -467,6 +476,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
 
 	init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
 
+	/* Set bit for "serialize task mgmt" all other bits need to be zero */
+	init_fw_cb->add_fw_options = 0;
+	init_fw_cb->add_fw_options |=
+	    __constant_cpu_to_le16(SERIALIZE_TASK_MGMT);
+
 	if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
 		!= QLA_SUCCESS) {
 		DEBUG2(printk(KERN_WARNING
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 3d5ef2df4134..35381cb0936e 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2304,14 +2304,13 @@ qla4_8xxx_enable_intrs(struct scsi_qla_host *ha)
 void
 qla4_8xxx_disable_intrs(struct scsi_qla_host *ha)
 {
-	if (test_bit(AF_INTERRUPTS_ON, &ha->flags))
+	if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
 		qla4_8xxx_mbx_intr_disable(ha);
 
 	spin_lock_irq(&ha->hardware_lock);
 	/* BIT 10 - set */
 	qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
 	spin_unlock_irq(&ha->hardware_lock);
-	clear_bit(AF_INTERRUPTS_ON, &ha->flags);
 }
 
 struct ql4_init_msix_entry {
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 967836ef5ab2..a4acb0dd7beb 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -29,10 +29,6 @@ static struct kmem_cache *srb_cachep;
 /*
  * Module parameter information and variables
  */
-int ql4xdiscoverywait = 60;
-module_param(ql4xdiscoverywait, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time");
-
 int ql4xdontresethba = 0;
 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdontresethba,
@@ -55,6 +51,17 @@ MODULE_PARM_DESC(ql4xenablemsix,
 		" 2 = enable MSI interrupt mechanism.");
 
 #define QL4_DEF_QDEPTH 32
+static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
+module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xmaxqdepth,
+		"Maximum queue depth to report for target devices.\n"
+		" Default: 32.");
+
+static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
+module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xsess_recovery_tmo,
+		"Target Session Recovery Timeout.\n"
+		" Default: 30 sec.");
 
 /*
  * SCSI host template entry points
@@ -165,7 +172,7 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
 		DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout "
 			      "of (%d) secs exhausted, marking device DEAD.\n",
 			      ha->host_no, __func__, ddb_entry->fw_ddb_index,
-			      QL4_SESS_RECOVERY_TMO));
+			      ddb_entry->sess->recovery_tmo));
 	}
 }
 
@@ -295,7 +302,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
 {
 	int err;
 
-	ddb_entry->sess->recovery_tmo = QL4_SESS_RECOVERY_TMO;
+	ddb_entry->sess->recovery_tmo = ql4xsess_recovery_tmo;
 
 	err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
 	if (err) {
@@ -753,12 +760,6 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
 	if (!pci_channel_offline(ha->pdev))
 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
 
-	if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) {
-		DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n",
-		    __func__));
-		return;
-	}
-
 	if (is_qla8022(ha)) {
 		qla4_8xxx_watchdog(ha);
 	}
@@ -1067,7 +1068,6 @@ void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
 
 	/* Disable the board */
 	ql4_printk(KERN_INFO, ha, "Disabling the board\n");
-	set_bit(AF_HBA_GOING_AWAY, &ha->flags);
 
 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
 	qla4xxx_mark_all_devices_missing(ha);
@@ -1218,6 +1218,27 @@ recover_ha_init_adapter:
 	return status;
 }
 
+static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
+{
+	struct ddb_entry *ddb_entry, *dtemp;
+
+	list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
+		if ((atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) ||
+		    (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) {
+			if (ddb_entry->fw_ddb_device_state ==
+			    DDB_DS_SESSION_ACTIVE) {
+				atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
+				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+				    " marked ONLINE\n",	ha->host_no, __func__,
+				    ddb_entry->fw_ddb_index);
+
+				iscsi_unblock_session(ddb_entry->sess);
+			} else
+				qla4xxx_relogin_device(ha, ddb_entry);
+		}
+	}
+}
+
 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
 {
 	if (ha->dpc_thread &&
@@ -1259,11 +1280,6 @@ static void qla4xxx_do_dpc(struct work_struct *work)
 		goto do_dpc_exit;
 	}
 
-	/* HBA is in the process of being permanently disabled.
-	 * Don't process anything */
-	if (test_bit(AF_HBA_GOING_AWAY, &ha->flags))
-		return;
-
 	if (is_qla8022(ha)) {
 		if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
 			qla4_8xxx_idc_lock(ha);
@@ -1331,13 +1347,7 @@ dpc_post_reset_ha:
 	if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
 		if (!test_bit(AF_LINK_UP, &ha->flags)) {
 			/* ---- link down? --- */
-			list_for_each_entry_safe(ddb_entry, dtemp,
-						 &ha->ddb_list, list) {
-				if (atomic_read(&ddb_entry->state) ==
-						DDB_STATE_ONLINE)
-					qla4xxx_mark_device_missing(ha,
-							ddb_entry);
-			}
+			qla4xxx_mark_all_devices_missing(ha);
 		} else {
 			/* ---- link up? --- *
 			 * F/W will auto login to all devices ONLY ONCE after
@@ -1346,30 +1356,7 @@ dpc_post_reset_ha:
 			 * manually relogin to devices when recovering from
 			 * connection failures, logouts, expired KATO, etc. */
 
-			list_for_each_entry_safe(ddb_entry, dtemp,
-							&ha->ddb_list, list) {
-				if ((atomic_read(&ddb_entry->state) ==
-						 DDB_STATE_MISSING) ||
-				    (atomic_read(&ddb_entry->state) ==
-						 DDB_STATE_DEAD)) {
-					if (ddb_entry->fw_ddb_device_state ==
-					    DDB_DS_SESSION_ACTIVE) {
-						atomic_set(&ddb_entry->state,
-							   DDB_STATE_ONLINE);
-						ql4_printk(KERN_INFO, ha,
-						    "scsi%ld: %s: ddb[%d]"
-						    " marked ONLINE\n",
-						    ha->host_no, __func__,
-						    ddb_entry->fw_ddb_index);
-
-						iscsi_unblock_session(
-						    ddb_entry->sess);
-					} else
-						qla4xxx_relogin_device(
-						    ha, ddb_entry);
-				}
-
-			}
+			qla4xxx_relogin_all_devices(ha);
 		}
 	}
 
@@ -1630,6 +1617,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
 	uint8_t init_retry_count = 0;
 	char buf[34];
 	struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
+	uint32_t dev_state;
 
 	if (pci_enable_device(pdev))
 		return -1;
@@ -1713,6 +1701,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
 	status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
 	    init_retry_count++ < MAX_INIT_RETRIES) {
+
+		if (is_qla8022(ha)) {
+			qla4_8xxx_idc_lock(ha);
+			dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+			qla4_8xxx_idc_unlock(ha);
+			if (dev_state == QLA82XX_DEV_FAILED) {
+				ql4_printk(KERN_WARNING, ha, "%s: don't retry "
+				    "initialize adapter. H/W is in failed state\n",
+				    __func__);
+				break;
+			}
+		}
 		DEBUG2(printk("scsi: %s: retrying adapter initialization "
 			      "(%d)\n", __func__, init_retry_count));
 
@@ -1814,6 +1814,44 @@ probe_disable_device:
 	return ret;
 }
 
+/**
+ * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
+ * @ha: pointer to adapter structure
+ *
+ * Mark the other ISP-4xxx port to indicate that the driver is being removed,
+ * so that the other port will not re-initialize while in the process of
+ * removing the ha due to driver unload or hba hotplug.
+ **/
+static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
+{
+	struct scsi_qla_host *other_ha = NULL;
+	struct pci_dev *other_pdev = NULL;
+	int fn = ISP4XXX_PCI_FN_2;
+
+	/*iscsi function numbers for ISP4xxx is 1 and 3*/
+	if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
+		fn = ISP4XXX_PCI_FN_1;
+
+	other_pdev =
+		pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
+		ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+		fn));
+
+	/* Get other_ha if other_pdev is valid and state is enable*/
+	if (other_pdev) {
+		if (atomic_read(&other_pdev->enable_cnt)) {
+			other_ha = pci_get_drvdata(other_pdev);
+			if (other_ha) {
+				set_bit(AF_HA_REMOVAL, &other_ha->flags);
+				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
+				    "Prevent %s reinit\n", __func__,
+				    dev_name(&other_ha->pdev->dev)));
+			}
+		}
+		pci_dev_put(other_pdev);
+	}
+}
+
 /**
  * qla4xxx_remove_adapter - calback function to remove adapter.
  * @pci_dev: PCI device pointer
@@ -1824,7 +1862,8 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
 
 	ha = pci_get_drvdata(pdev);
 
-	set_bit(AF_HBA_GOING_AWAY, &ha->flags);
+	if (!is_qla8022(ha))
+		qla4xxx_prevent_other_port_reinit(ha);
 
 	/* remove devs from iscsi_sessions to scsi_devices */
 	qla4xxx_free_ddb_list(ha);
@@ -1868,10 +1907,15 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev)
 {
 	struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target);
 	struct ddb_entry *ddb = sess->dd_data;
+	int queue_depth = QL4_DEF_QDEPTH;
 
 	sdev->hostdata = ddb;
 	sdev->tagged_supported = 1;
-	scsi_activate_tcq(sdev, QL4_DEF_QDEPTH);
+
+	if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
+		queue_depth = ql4xmaxqdepth;
+
+	scsi_activate_tcq(sdev, queue_depth);
 	return 0;
 }
 
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 8475b308e01b..603155769407 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.02.00-k5"
+#define QLA4XXX_DRIVER_VERSION	"5.02.00-k6"
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index b4218390941e..3fd16d7212de 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1917,7 +1917,7 @@ store_priv_session_##field(struct device *dev,				\
 #define iscsi_priv_session_rw_attr(field, format)			\
 	iscsi_priv_session_attr_show(field, format)			\
 	iscsi_priv_session_attr_store(field)				\
-static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUGO,		\
+static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR,		\
 			show_priv_session_##field,			\
 			store_priv_session_##field)
 iscsi_priv_session_rw_attr(recovery_tmo, "%d");
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 7ff61d76b4c5..b61ebec6bca7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2027,14 +2027,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
 	int old_rcd = sdkp->RCD;
 	int old_dpofua = sdkp->DPOFUA;
 
-	if (sdp->skip_ms_page_8) {
-		if (sdp->type == TYPE_RBC)
-			goto defaults;
-		else {
-			modepage = 0x3F;
-			dbd = 0;
-		}
-	} else if (sdp->type == TYPE_RBC) {
+	if (sdp->skip_ms_page_8)
+		goto defaults;
+
+	if (sdp->type == TYPE_RBC) {
 		modepage = 6;
 		dbd = 8;
 	} else {
@@ -2062,11 +2058,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
 	 */
 	if (len < 3)
 		goto bad_sense;
-	else if (len > SD_BUF_SIZE) {
-		sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
-			  "data from %d to %d bytes\n", len, SD_BUF_SIZE);
-		len = SD_BUF_SIZE;
-	}
+	if (len > 20)
+		len = 20;
+
+	/* Take headers and block descriptors into account */
+	len += data.header_length + data.block_descriptor_length;
+	if (len > SD_BUF_SIZE)
+		goto bad_sense;
 
 	/* Get the data */
 	res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
@@ -2074,45 +2072,16 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
 	if (scsi_status_is_good(res)) {
 		int offset = data.header_length + data.block_descriptor_length;
 
-		while (offset < len) {
-			u8 page_code = buffer[offset] & 0x3F;
-			u8 spf       = buffer[offset] & 0x40;
-
-			if (page_code == 8 || page_code == 6) {
-				/* We're interested only in the first 3 bytes.
-				 */
-				if (len - offset <= 2) {
-					sd_printk(KERN_ERR, sdkp, "Incomplete "
-						  "mode parameter data\n");
-					goto defaults;
-				} else {
-					modepage = page_code;
-					goto Page_found;
-				}
-			} else {
-				/* Go to the next page */
-				if (spf && len - offset > 3)
-					offset += 4 + (buffer[offset+2] << 8) +
-						buffer[offset+3];
-				else if (!spf && len - offset > 1)
-					offset += 2 + buffer[offset+1];
-				else {
-					sd_printk(KERN_ERR, sdkp, "Incomplete "
-						  "mode parameter data\n");
-					goto defaults;
-				}
-			}
+		if (offset >= SD_BUF_SIZE - 2) {
+			sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
+			goto defaults;
 		}
 
-		if (modepage == 0x3F) {
-			sd_printk(KERN_ERR, sdkp, "No Caching mode page "
-				  "present\n");
-			goto defaults;
-		} else if ((buffer[offset] & 0x3f) != modepage) {
+		if ((buffer[offset] & 0x3f) != modepage) {
 			sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
 			goto defaults;
 		}
-	Page_found:
+
 		if (modepage == 8) {
 			sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
 			sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 7f5a6a86f820..eb7a3e85304f 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -35,9 +35,11 @@
 
 struct ses_device {
 	unsigned char *page1;
+	unsigned char *page1_types;
 	unsigned char *page2;
 	unsigned char *page10;
 	short page1_len;
+	short page1_num_types;
 	short page2_len;
 	short page10_len;
 };
@@ -110,12 +112,12 @@ static int ses_set_page2_descriptor(struct enclosure_device *edev,
 	int i, j, count = 0, descriptor = ecomp->number;
 	struct scsi_device *sdev = to_scsi_device(edev->edev.parent);
 	struct ses_device *ses_dev = edev->scratch;
-	unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
+	unsigned char *type_ptr = ses_dev->page1_types;
 	unsigned char *desc_ptr = ses_dev->page2 + 8;
 
 	/* Clear everything */
 	memset(desc_ptr, 0, ses_dev->page2_len - 8);
-	for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
+	for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
 		for (j = 0; j < type_ptr[1]; j++) {
 			desc_ptr += 4;
 			if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
@@ -140,12 +142,12 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
 	int i, j, count = 0, descriptor = ecomp->number;
 	struct scsi_device *sdev = to_scsi_device(edev->edev.parent);
 	struct ses_device *ses_dev = edev->scratch;
-	unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
+	unsigned char *type_ptr = ses_dev->page1_types;
 	unsigned char *desc_ptr = ses_dev->page2 + 8;
 
 	ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
 
-	for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
+	for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
 		for (j = 0; j < type_ptr[1]; j++) {
 			desc_ptr += 4;
 			if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
@@ -358,7 +360,7 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
 	unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL;
 	int i, j, page7_len, len, components;
 	struct ses_device *ses_dev = edev->scratch;
-	int types = ses_dev->page1[10];
+	int types = ses_dev->page1_num_types;
 	unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
 
 	if (!hdr_buf)
@@ -390,10 +392,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
 		len = (desc_ptr[2] << 8) + desc_ptr[3];
 		/* skip past overall descriptor */
 		desc_ptr += len + 4;
-		if (ses_dev->page10)
-			addl_desc_ptr = ses_dev->page10 + 8;
 	}
-	type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
+	if (ses_dev->page10)
+		addl_desc_ptr = ses_dev->page10 + 8;
+	type_ptr = ses_dev->page1_types;
 	components = 0;
 	for (i = 0; i < types; i++, type_ptr += 4) {
 		for (j = 0; j < type_ptr[1]; j++) {
@@ -503,6 +505,7 @@ static int ses_intf_add(struct device *cdev,
 	u32 result;
 	int i, types, len, components = 0;
 	int err = -ENOMEM;
+	int num_enclosures;
 	struct enclosure_device *edev;
 	struct ses_component *scomp = NULL;
 
@@ -530,16 +533,6 @@ static int ses_intf_add(struct device *cdev,
 	if (result)
 		goto recv_failed;
 
-	if (hdr_buf[1] != 0) {
-		/* FIXME: need subenclosure support; I've just never
-		 * seen a device with subenclosures and it makes the
-		 * traversal routines more complex */
-		sdev_printk(KERN_ERR, sdev,
-			"FIXME driver has no support for subenclosures (%d)\n",
-			hdr_buf[1]);
-		goto err_free;
-	}
-
 	len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
 	buf = kzalloc(len, GFP_KERNEL);
 	if (!buf)
@@ -549,11 +542,24 @@ static int ses_intf_add(struct device *cdev,
 	if (result)
 		goto recv_failed;
 
-	types = buf[10];
+	types = 0;
 
-	type_ptr = buf + 12 + buf[11];
+	/* we always have one main enclosure and the rest are referred
+	 * to as secondary subenclosures */
+	num_enclosures = buf[1] + 1;
 
-	for (i = 0; i < types; i++, type_ptr += 4) {
+	/* begin at the enclosure descriptor */
+	type_ptr = buf + 8;
+	/* skip all the enclosure descriptors */
+	for (i = 0; i < num_enclosures && type_ptr < buf + len; i++) {
+		types += type_ptr[2];
+		type_ptr += type_ptr[3] + 4;
+	}
+
+	ses_dev->page1_types = type_ptr;
+	ses_dev->page1_num_types = types;
+
+	for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) {
 		if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
 		    type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
 			components += type_ptr[1];
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 2fac3be209ac..9ef2dbbfa62b 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -29,4 +29,6 @@ config TCM_PSCSI
 	Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
 	passthrough access to Linux/SCSI device
 
+source "drivers/target/loopback/Kconfig"
+
 endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 973bb190ef57..1178bbfc68fe 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -1,4 +1,3 @@
-EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/
 
 target_core_mod-y		:= target_core_configfs.o \
 				   target_core_device.o \
@@ -13,7 +12,8 @@ target_core_mod-y		:= target_core_configfs.o \
 				   target_core_transport.o \
 				   target_core_cdb.o \
 				   target_core_ua.o \
-				   target_core_rd.o
+				   target_core_rd.o \
+				   target_core_stat.o
 
 obj-$(CONFIG_TARGET_CORE)	+= target_core_mod.o
 
@@ -21,3 +21,6 @@ obj-$(CONFIG_TARGET_CORE)	+= target_core_mod.o
 obj-$(CONFIG_TCM_IBLOCK)	+= target_core_iblock.o
 obj-$(CONFIG_TCM_FILEIO)	+= target_core_file.o
 obj-$(CONFIG_TCM_PSCSI)		+= target_core_pscsi.o
+
+# Fabric modules
+obj-$(CONFIG_LOOPBACK_TARGET)	+= loopback/
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
new file mode 100644
index 000000000000..57dcbc2d711b
--- /dev/null
+++ b/drivers/target/loopback/Kconfig
@@ -0,0 +1,11 @@
+config LOOPBACK_TARGET
+	tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module"
+	help
+	  Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
+	  fabric loopback module.
+
+config LOOPBACK_TARGET_CDB_DEBUG
+	bool "TCM loopback fabric module CDB debug code"
+	depends on LOOPBACK_TARGET
+	help
+	  Say Y here to enable the TCM loopback fabric module CDB debug code
diff --git a/drivers/target/loopback/Makefile b/drivers/target/loopback/Makefile
new file mode 100644
index 000000000000..6abebdf95659
--- /dev/null
+++ b/drivers/target/loopback/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LOOPBACK_TARGET)	+= tcm_loop.o
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
new file mode 100644
index 000000000000..aed4e464d31c
--- /dev/null
+++ b/drivers/target/loopback/tcm_loop.c
@@ -0,0 +1,1579 @@
+/*******************************************************************************
+ *
+ * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
+ * for emulated SAS initiator ports
+ *
+ * © Copyright 2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_fabric_lib.h>
+#include <target/target_core_configfs.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_tmr.h>
+
+#include "tcm_loop.h"
+
+#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *tcm_loop_fabric_configfs;
+
+static struct kmem_cache *tcm_loop_cmd_cache;
+
+static int tcm_loop_hba_no_cnt;
+
+/*
+ * Allocate a tcm_loop cmd descriptor from target_core_mod code
+ *
+ * Can be called from interrupt context in tcm_loop_queuecommand() below
+ */
+static struct se_cmd *tcm_loop_allocate_core_cmd(
+	struct tcm_loop_hba *tl_hba,
+	struct se_portal_group *se_tpg,
+	struct scsi_cmnd *sc)
+{
+	struct se_cmd *se_cmd;
+	struct se_session *se_sess;
+	struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
+	struct tcm_loop_cmd *tl_cmd;
+	int sam_task_attr;
+
+	if (!tl_nexus) {
+		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
+				" does not exist\n");
+		set_host_byte(sc, DID_ERROR);
+		return NULL;
+	}
+	se_sess = tl_nexus->se_sess;
+
+	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
+	if (!tl_cmd) {
+		printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
+		set_host_byte(sc, DID_ERROR);
+		return NULL;
+	}
+	se_cmd = &tl_cmd->tl_se_cmd;
+	/*
+	 * Save the pointer to struct scsi_cmnd *sc
+	 */
+	tl_cmd->sc = sc;
+	/*
+	 * Locate the SAM Task Attr from struct scsi_cmnd *
+	 */
+	if (sc->device->tagged_supported) {
+		switch (sc->tag) {
+		case HEAD_OF_QUEUE_TAG:
+			sam_task_attr = TASK_ATTR_HOQ;
+			break;
+		case ORDERED_QUEUE_TAG:
+			sam_task_attr = TASK_ATTR_ORDERED;
+			break;
+		default:
+			sam_task_attr = TASK_ATTR_SIMPLE;
+			break;
+		}
+	} else
+		sam_task_attr = TASK_ATTR_SIMPLE;
+
+	/*
+	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+	 */
+	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+			scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
+			&tl_cmd->tl_sense_buf[0]);
+
+	/*
+	 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
+	 */
+	if (scsi_bidi_cmnd(sc))
+		T_TASK(se_cmd)->t_tasks_bidi = 1;
+	/*
+	 * Locate the struct se_lun pointer and attach it to struct se_cmd
+	 */
+	if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) {
+		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+		set_host_byte(sc, DID_NO_CONNECT);
+		return NULL;
+	}
+
+	transport_device_setup_cmd(se_cmd);
+	return se_cmd;
+}
+
+/*
+ * Called by struct target_core_fabric_ops->new_cmd_map()
+ *
+ * Always called in process context.  A non zero return value
+ * here will signal to handle an exception based on the return code.
+ */
+static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+				struct tcm_loop_cmd, tl_se_cmd);
+	struct scsi_cmnd *sc = tl_cmd->sc;
+	void *mem_ptr, *mem_bidi_ptr = NULL;
+	u32 sg_no_bidi = 0;
+	int ret;
+	/*
+	 * Allocate the necessary tasks to complete the received CDB+data
+	 */
+	ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd);
+	if (ret == -1) {
+		/* Out of Resources */
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	} else if (ret == -2) {
+		/*
+		 * Handle case for SAM_STAT_RESERVATION_CONFLICT
+		 */
+		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		/*
+		 * Otherwise, return SAM_STAT_CHECK_CONDITION and return
+		 * sense data.
+		 */
+		return PYX_TRANSPORT_USE_SENSE_REASON;
+	}
+	/*
+	 * Setup the struct scatterlist memory from the received
+	 * struct scsi_cmnd.
+	 */
+	if (scsi_sg_count(sc)) {
+		se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM;
+		mem_ptr = (void *)scsi_sglist(sc);
+		/*
+		 * For BIDI commands, pass in the extra READ buffer
+		 * to transport_generic_map_mem_to_cmd() below..
+		 */
+		if (T_TASK(se_cmd)->t_tasks_bidi) {
+			struct scsi_data_buffer *sdb = scsi_in(sc);
+
+			mem_bidi_ptr = (void *)sdb->table.sgl;
+			sg_no_bidi = sdb->table.nents;
+		}
+	} else {
+		/*
+		 * Used for DMA_NONE
+		 */
+		mem_ptr = NULL;
+	}
+	/*
+	 * Map the SG memory into struct se_mem->page linked list using the same
+	 * physical memory at sg->page_link.
+	 */
+	ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr,
+			scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi);
+	if (ret < 0)
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+	return 0;
+}
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free()
+ */
+static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
+{
+	/*
+	 * Do not release struct se_cmd's containing a valid TMR
+	 * pointer.  These will be released directly in tcm_loop_device_reset()
+	 * with transport_generic_free_cmd().
+	 */
+	if (se_cmd->se_tmr_req)
+		return;
+	/*
+	 * Release the struct se_cmd, which will make a callback to release
+	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
+	 */
+	transport_generic_free_cmd(se_cmd, 0, 1, 0);
+}
+
+/*
+ * Called from struct target_core_fabric_ops->release_cmd_to_pool()
+ */
+static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+				struct tcm_loop_cmd, tl_se_cmd);
+
+	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+}
+
+static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer,
+				char **start, off_t offset,
+				int length, int inout)
+{
+	return sprintf(buffer, "tcm_loop_proc_info()\n");
+}
+
+static int tcm_loop_driver_probe(struct device *);
+static int tcm_loop_driver_remove(struct device *);
+
+static int pseudo_lld_bus_match(struct device *dev,
+				struct device_driver *dev_driver)
+{
+	return 1;
+}
+
+static struct bus_type tcm_loop_lld_bus = {
+	.name			= "tcm_loop_bus",
+	.match			= pseudo_lld_bus_match,
+	.probe			= tcm_loop_driver_probe,
+	.remove			= tcm_loop_driver_remove,
+};
+
+static struct device_driver tcm_loop_driverfs = {
+	.name			= "tcm_loop",
+	.bus			= &tcm_loop_lld_bus,
+};
+/*
+ * Used with root_device_register() in tcm_loop_alloc_core_bus() below
+ */
+struct device *tcm_loop_primary;
+
+/*
+ * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
+ * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
+ */
+static int tcm_loop_change_queue_depth(
+	struct scsi_device *sdev,
+	int depth,
+	int reason)
+{
+	switch (reason) {
+	case SCSI_QDEPTH_DEFAULT:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+		break;
+	case SCSI_QDEPTH_QFULL:
+		scsi_track_queue_full(sdev, depth);
+		break;
+	case SCSI_QDEPTH_RAMP_UP:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return sdev->queue_depth;
+}
+
+/*
+ * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data
+ * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs)
+ */
+static int tcm_loop_queuecommand(
+	struct Scsi_Host *sh,
+	struct scsi_cmnd *sc)
+{
+	struct se_cmd *se_cmd;
+	struct se_portal_group *se_tpg;
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_tpg *tl_tpg;
+
+	TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
+		" scsi_buf_len: %u\n", sc->device->host->host_no,
+		sc->device->id, sc->device->channel, sc->device->lun,
+		sc->cmnd[0], scsi_bufflen(sc));
+	/*
+	 * Locate the tcm_loop_hba_t pointer
+	 */
+	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+	se_tpg = &tl_tpg->tl_se_tpg;
+	/*
+	 * Determine the SAM Task Attribute and allocate tl_cmd and
+	 * tl_cmd->tl_se_cmd from TCM infrastructure
+	 */
+	se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc);
+	if (!se_cmd) {
+		sc->scsi_done(sc);
+		return 0;
+	}
+	/*
+	 * Queue up the newly allocated to be processed in TCM thread context.
+	*/
+	transport_generic_handle_cdb_map(se_cmd);
+	return 0;
+}
+
+/*
+ * Called from SCSI EH process context to issue a LUN_RESET TMR
+ * to struct scsi_device
+ */
+static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+{
+	struct se_cmd *se_cmd = NULL;
+	struct se_portal_group *se_tpg;
+	struct se_session *se_sess;
+	struct tcm_loop_cmd *tl_cmd = NULL;
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_nexus *tl_nexus;
+	struct tcm_loop_tmr *tl_tmr = NULL;
+	struct tcm_loop_tpg *tl_tpg;
+	int ret = FAILED;
+	/*
+	 * Locate the tcm_loop_hba_t pointer
+	 */
+	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+	/*
+	 * Locate the tl_nexus and se_sess pointers
+	 */
+	tl_nexus = tl_hba->tl_nexus;
+	if (!tl_nexus) {
+		printk(KERN_ERR "Unable to perform device reset without"
+				" active I_T Nexus\n");
+		return FAILED;
+	}
+	se_sess = tl_nexus->se_sess;
+	/*
+	 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
+	 */
+	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+	se_tpg = &tl_tpg->tl_se_tpg;
+
+	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
+	if (!tl_cmd) {
+		printk(KERN_ERR "Unable to allocate memory for tl_cmd\n");
+		return FAILED;
+	}
+
+	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
+	if (!tl_tmr) {
+		printk(KERN_ERR "Unable to allocate memory for tl_tmr\n");
+		goto release;
+	}
+	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
+
+	se_cmd = &tl_cmd->tl_se_cmd;
+	/*
+	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+	 */
+	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
+				DMA_NONE, TASK_ATTR_SIMPLE,
+				&tl_cmd->tl_sense_buf[0]);
+	/*
+	 * Allocate the LUN_RESET TMR
+	 */
+	se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
+				TMR_LUN_RESET);
+	if (!se_cmd->se_tmr_req)
+		goto release;
+	/*
+	 * Locate the underlying TCM struct se_lun from sc->device->lun
+	 */
+	if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0)
+		goto release;
+	/*
+	 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
+	 * to wake us up.
+	 */
+	transport_generic_handle_tmr(se_cmd);
+	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
+	/*
+	 * The TMR LUN_RESET has completed, check the response status and
+	 * then release allocations.
+	 */
+	ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
+		SUCCESS : FAILED;
+release:
+	if (se_cmd)
+		transport_generic_free_cmd(se_cmd, 1, 1, 0);
+	else
+		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+	kfree(tl_tmr);
+	return ret;
+}
+
+static int tcm_loop_slave_alloc(struct scsi_device *sd)
+{
+	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
+	return 0;
+}
+
+static int tcm_loop_slave_configure(struct scsi_device *sd)
+{
+	return 0;
+}
+
+static struct scsi_host_template tcm_loop_driver_template = {
+	.proc_info		= tcm_loop_proc_info,
+	.proc_name		= "tcm_loopback",
+	.name			= "TCM_Loopback",
+	.queuecommand		= tcm_loop_queuecommand,
+	.change_queue_depth	= tcm_loop_change_queue_depth,
+	.eh_device_reset_handler = tcm_loop_device_reset,
+	.can_queue		= TL_SCSI_CAN_QUEUE,
+	.this_id		= -1,
+	.sg_tablesize		= TL_SCSI_SG_TABLESIZE,
+	.cmd_per_lun		= TL_SCSI_CMD_PER_LUN,
+	.max_sectors		= TL_SCSI_MAX_SECTORS,
+	.use_clustering		= DISABLE_CLUSTERING,
+	.slave_alloc		= tcm_loop_slave_alloc,
+	.slave_configure	= tcm_loop_slave_configure,
+	.module			= THIS_MODULE,
+};
+
+static int tcm_loop_driver_probe(struct device *dev)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct Scsi_Host *sh;
+	int error;
+
+	tl_hba = to_tcm_loop_hba(dev);
+
+	sh = scsi_host_alloc(&tcm_loop_driver_template,
+			sizeof(struct tcm_loop_hba));
+	if (!sh) {
+		printk(KERN_ERR "Unable to allocate struct scsi_host\n");
+		return -ENODEV;
+	}
+	tl_hba->sh = sh;
+
+	/*
+	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
+	 */
+	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
+	/*
+	 * Setup single ID, Channel and LUN for now..
+	 */
+	sh->max_id = 2;
+	sh->max_lun = 0;
+	sh->max_channel = 0;
+	sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
+
+	error = scsi_add_host(sh, &tl_hba->dev);
+	if (error) {
+		printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
+		scsi_host_put(sh);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int tcm_loop_driver_remove(struct device *dev)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct Scsi_Host *sh;
+
+	tl_hba = to_tcm_loop_hba(dev);
+	sh = tl_hba->sh;
+
+	scsi_remove_host(sh);
+	scsi_host_put(sh);
+	return 0;
+}
+
+static void tcm_loop_release_adapter(struct device *dev)
+{
+	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
+
+	kfree(tl_hba);
+}
+
+/*
+ * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
+ */
+static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
+{
+	int ret;
+
+	tl_hba->dev.bus = &tcm_loop_lld_bus;
+	tl_hba->dev.parent = tcm_loop_primary;
+	tl_hba->dev.release = &tcm_loop_release_adapter;
+	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
+
+	ret = device_register(&tl_hba->dev);
+	if (ret) {
+		printk(KERN_ERR "device_register() failed for"
+				" tl_hba->dev: %d\n", ret);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/*
+ * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
+ * tcm_loop SCSI bus.
+ */
+static int tcm_loop_alloc_core_bus(void)
+{
+	int ret;
+
+	tcm_loop_primary = root_device_register("tcm_loop_0");
+	if (IS_ERR(tcm_loop_primary)) {
+		printk(KERN_ERR "Unable to allocate tcm_loop_primary\n");
+		return PTR_ERR(tcm_loop_primary);
+	}
+
+	ret = bus_register(&tcm_loop_lld_bus);
+	if (ret) {
+		printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n");
+		goto dev_unreg;
+	}
+
+	ret = driver_register(&tcm_loop_driverfs);
+	if (ret) {
+		printk(KERN_ERR "driver_register() failed for"
+				"tcm_loop_driverfs\n");
+		goto bus_unreg;
+	}
+
+	printk(KERN_INFO "Initialized TCM Loop Core Bus\n");
+	return ret;
+
+bus_unreg:
+	bus_unregister(&tcm_loop_lld_bus);
+dev_unreg:
+	root_device_unregister(tcm_loop_primary);
+	return ret;
+}
+
+static void tcm_loop_release_core_bus(void)
+{
+	driver_unregister(&tcm_loop_driverfs);
+	bus_unregister(&tcm_loop_lld_bus);
+	root_device_unregister(tcm_loop_primary);
+
+	printk(KERN_INFO "Releasing TCM Loop Core BUS\n");
+}
+
+static char *tcm_loop_get_fabric_name(void)
+{
+	return "loopback";
+}
+
+static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	struct tcm_loop_tpg *tl_tpg =
+			(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+	/*
+	 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
+	 * time based on the protocol dependent prefix of the passed configfs group.
+	 *
+	 * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
+	 * ProtocolID using target_core_fabric_lib.c symbols.
+	 */
+	switch (tl_hba->tl_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_fabric_proto_ident(se_tpg);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_fabric_proto_ident(se_tpg);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_fabric_proto_ident(se_tpg);
+	default:
+		printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+			" SAS emulation\n", tl_hba->tl_proto_id);
+		break;
+	}
+
+	return sas_get_fabric_proto_ident(se_tpg);
+}
+
+static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+	struct tcm_loop_tpg *tl_tpg =
+		(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	/*
+	 * Return the passed NAA identifier for the SAS Target Port
+	 */
+	return &tl_tpg->tl_hba->tl_wwn_address[0];
+}
+
+static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
+{
+	struct tcm_loop_tpg *tl_tpg =
+		(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	/*
+	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
+	 * to represent the SCSI Target Port.
+	 */
+	return tl_tpg->tl_tpgt;
+}
+
+static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static u32 tcm_loop_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	struct tcm_loop_tpg *tl_tpg =
+			(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+	switch (tl_hba->tl_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	default:
+		printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+			" SAS emulation\n", tl_hba->tl_proto_id);
+		break;
+	}
+
+	return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+			format_code, buf);
+}
+
+static u32 tcm_loop_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	struct tcm_loop_tpg *tl_tpg =
+			(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+	switch (tl_hba->tl_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	default:
+		printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+			" SAS emulation\n", tl_hba->tl_proto_id);
+		break;
+	}
+
+	return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+			format_code);
+}
+
+/*
+ * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
+ * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
+ */
+static char *tcm_loop_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	struct tcm_loop_tpg *tl_tpg =
+			(struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+	switch (tl_hba->tl_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	case SCSI_PROTOCOL_FCP:
+		return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	default:
+		printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+			" SAS emulation\n", tl_hba->tl_proto_id);
+		break;
+	}
+
+	return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+			port_nexus_ptr);
+}
+
+/*
+ * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
+ * based upon the incoming fabric dependent SCSI Initiator Port
+ */
+static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+/*
+ * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
+ * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
+ */
+static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+/*
+ * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
+ * never be called for TCM_Loop by target_core_fabric_configfs.c code.
+ * It has been added here as a nop for target_fabric_tf_ops_check()
+ */
+static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
+	struct se_portal_group *se_tpg)
+{
+	struct tcm_loop_nacl *tl_nacl;
+
+	tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
+	if (!tl_nacl) {
+		printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n");
+		return NULL;
+	}
+
+	return &tl_nacl->se_node_acl;
+}
+
+static void tcm_loop_tpg_release_fabric_acl(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl)
+{
+	struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
+				struct tcm_loop_nacl, se_node_acl);
+
+	kfree(tl_nacl);
+}
+
+static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd)
+{
+	/*
+	 * Since TCM_loop is already passing struct scatterlist data from
+	 * struct scsi_cmnd, no more Linux/SCSI failure dependent state need
+	 * to be handled here.
+	 */
+	return;
+}
+
+static int tcm_loop_is_state_remove(struct se_cmd *se_cmd)
+{
+	/*
+	 * Assume struct scsi_cmnd is not in remove state..
+	 */
+	return 0;
+}
+
+static int tcm_loop_sess_logged_in(struct se_session *se_sess)
+{
+	/*
+	 * Assume that TL Nexus is always active
+	 */
+	return 1;
+}
+
+static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
+{
+	return 1;
+}
+
+static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
+{
+	return;
+}
+
+static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
+{
+	return 1;
+}
+
+static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+			struct tcm_loop_cmd, tl_se_cmd);
+
+	return tl_cmd->sc_cmd_state;
+}
+
+static int tcm_loop_shutdown_session(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static void tcm_loop_close_session(struct se_session *se_sess)
+{
+	return;
+};
+
+static void tcm_loop_stop_session(
+	struct se_session *se_sess,
+	int sess_sleep,
+	int conn_sleep)
+{
+	return;
+}
+
+static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess)
+{
+	return;
+}
+
+static int tcm_loop_write_pending(struct se_cmd *se_cmd)
+{
+	/*
+	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
+	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
+	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
+	 * format with transport_generic_map_mem_to_cmd().
+	 *
+	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
+	 * object execution queue.
+	 */
+	transport_generic_process_write(se_cmd);
+	return 0;
+}
+
+static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+				struct tcm_loop_cmd, tl_se_cmd);
+	struct scsi_cmnd *sc = tl_cmd->sc;
+
+	TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
+		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
+
+	sc->result = SAM_STAT_GOOD;
+	set_host_byte(sc, DID_OK);
+	sc->scsi_done(sc);
+	return 0;
+}
+
+static int tcm_loop_queue_status(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+				struct tcm_loop_cmd, tl_se_cmd);
+	struct scsi_cmnd *sc = tl_cmd->sc;
+
+	TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p"
+			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
+
+	if (se_cmd->sense_buffer &&
+	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+
+		memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer,
+				SCSI_SENSE_BUFFERSIZE);
+		sc->result = SAM_STAT_CHECK_CONDITION;
+		set_driver_byte(sc, DRIVER_SENSE);
+	} else
+		sc->result = se_cmd->scsi_status;
+
+	set_host_byte(sc, DID_OK);
+	sc->scsi_done(sc);
+	return 0;
+}
+
+static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+	struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
+	/*
+	 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
+	 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
+	 */
+	atomic_set(&tl_tmr->tmr_complete, 1);
+	wake_up(&tl_tmr->tl_tmr_wait);
+	return 0;
+}
+
+static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+	return 0;
+}
+
+static u16 tcm_loop_get_fabric_sense_len(void)
+{
+	return 0;
+}
+
+static u64 tcm_loop_pack_lun(unsigned int lun)
+{
+	u64 result;
+
+	/* LSB of lun into byte 1 big-endian */
+	result = ((lun & 0xff) << 8);
+	/* use flat space addressing method */
+	result |= 0x40 | ((lun >> 8) & 0x3f);
+
+	return cpu_to_le64(result);
+}
+
+static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
+{
+	switch (tl_hba->tl_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return "SAS";
+	case SCSI_PROTOCOL_FCP:
+		return "FCP";
+	case SCSI_PROTOCOL_ISCSI:
+		return "iSCSI";
+	default:
+		break;
+	}
+
+	return "Unknown";
+}
+
+/* Start items for tcm_loop_port_cit */
+
+static int tcm_loop_port_link(
+	struct se_portal_group *se_tpg,
+	struct se_lun *lun)
+{
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+				struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+	atomic_inc(&tl_tpg->tl_tpg_port_count);
+	smp_mb__after_atomic_inc();
+	/*
+	 * Add Linux/SCSI struct scsi_device by HCTL
+	 */
+	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
+
+	printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n");
+	return 0;
+}
+
+static void tcm_loop_port_unlink(
+	struct se_portal_group *se_tpg,
+	struct se_lun *se_lun)
+{
+	struct scsi_device *sd;
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_tpg *tl_tpg;
+
+	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
+	tl_hba = tl_tpg->tl_hba;
+
+	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
+				se_lun->unpacked_lun);
+	if (!sd) {
+		printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:"
+			"%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
+		return;
+	}
+	/*
+	 * Remove Linux/SCSI struct scsi_device by HCTL
+	 */
+	scsi_remove_device(sd);
+	scsi_device_put(sd);
+
+	atomic_dec(&tl_tpg->tl_tpg_port_count);
+	smp_mb__after_atomic_dec();
+
+	printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n");
+}
+
+/* End items for tcm_loop_port_cit */
+
+/* Start items for tcm_loop_nexus_cit */
+
+static int tcm_loop_make_nexus(
+	struct tcm_loop_tpg *tl_tpg,
+	const char *name)
+{
+	struct se_portal_group *se_tpg;
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+	struct tcm_loop_nexus *tl_nexus;
+
+	if (tl_tpg->tl_hba->tl_nexus) {
+		printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
+		return -EEXIST;
+	}
+	se_tpg = &tl_tpg->tl_se_tpg;
+
+	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
+	if (!tl_nexus) {
+		printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n");
+		return -ENOMEM;
+	}
+	/*
+	 * Initialize the struct se_session pointer
+	 */
+	tl_nexus->se_sess = transport_init_session();
+	if (!tl_nexus->se_sess)
+		goto out;
+	/*
+	 * Since we are running in 'demo mode' this call with generate a
+	 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
+	 * Initiator port name of the passed configfs group 'name'.
+	 */
+	tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+				se_tpg, (unsigned char *)name);
+	if (!tl_nexus->se_sess->se_node_acl) {
+		transport_free_session(tl_nexus->se_sess);
+		goto out;
+	}
+	/*
+	 * Now, register the SAS I_T Nexus as active with the call to
+	 * transport_register_session()
+	 */
+	__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
+			tl_nexus->se_sess, (void *)tl_nexus);
+	tl_tpg->tl_hba->tl_nexus = tl_nexus;
+	printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+		name);
+	return 0;
+
+out:
+	kfree(tl_nexus);
+	return -ENOMEM;
+}
+
+static int tcm_loop_drop_nexus(
+	struct tcm_loop_tpg *tpg)
+{
+	struct se_session *se_sess;
+	struct tcm_loop_nexus *tl_nexus;
+	struct tcm_loop_hba *tl_hba = tpg->tl_hba;
+
+	tl_nexus = tpg->tl_hba->tl_nexus;
+	if (!tl_nexus)
+		return -ENODEV;
+
+	se_sess = tl_nexus->se_sess;
+	if (!se_sess)
+		return -ENODEV;
+
+	if (atomic_read(&tpg->tl_tpg_port_count)) {
+		printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with"
+			" active TPG port count: %d\n",
+			atomic_read(&tpg->tl_tpg_port_count));
+		return -EPERM;
+	}
+
+	printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+		tl_nexus->se_sess->se_node_acl->initiatorname);
+	/*
+	 * Release the SCSI I_T Nexus to the emulated SAS Target Port
+	 */
+	transport_deregister_session(tl_nexus->se_sess);
+	tpg->tl_hba->tl_nexus = NULL;
+	kfree(tl_nexus);
+	return 0;
+}
+
+/* End items for tcm_loop_nexus_cit */
+
+static ssize_t tcm_loop_tpg_show_nexus(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+			struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_nexus *tl_nexus;
+	ssize_t ret;
+
+	tl_nexus = tl_tpg->tl_hba->tl_nexus;
+	if (!tl_nexus)
+		return -ENODEV;
+
+	ret = snprintf(page, PAGE_SIZE, "%s\n",
+		tl_nexus->se_sess->se_node_acl->initiatorname);
+
+	return ret;
+}
+
+static ssize_t tcm_loop_tpg_store_nexus(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+			struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
+	int ret;
+	/*
+	 * Shutdown the active I_T nexus if 'NULL' is passed..
+	 */
+	if (!strncmp(page, "NULL", 4)) {
+		ret = tcm_loop_drop_nexus(tl_tpg);
+		return (!ret) ? count : ret;
+	}
+	/*
+	 * Otherwise make sure the passed virtual Initiator port WWN matches
+	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
+	 * tcm_loop_make_nexus()
+	 */
+	if (strlen(page) > TL_WWN_ADDR_LEN) {
+		printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
+				" max: %d\n", page, TL_WWN_ADDR_LEN);
+		return -EINVAL;
+	}
+	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
+
+	ptr = strstr(i_port, "naa.");
+	if (ptr) {
+		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
+			printk(KERN_ERR "Passed SAS Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_loop_dump_proto_id(tl_hba));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[0];
+		goto check_newline;
+	}
+	ptr = strstr(i_port, "fc.");
+	if (ptr) {
+		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
+			printk(KERN_ERR "Passed FCP Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_loop_dump_proto_id(tl_hba));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[3]; /* Skip over "fc." */
+		goto check_newline;
+	}
+	ptr = strstr(i_port, "iqn.");
+	if (ptr) {
+		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
+			printk(KERN_ERR "Passed iSCSI Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_loop_dump_proto_id(tl_hba));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[0];
+		goto check_newline;
+	}
+	printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:"
+			" %s\n", i_port);
+	return -EINVAL;
+	/*
+	 * Clear any trailing newline for the NAA WWN
+	 */
+check_newline:
+	if (i_port[strlen(i_port)-1] == '\n')
+		i_port[strlen(i_port)-1] = '\0';
+
+	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
+	&tcm_loop_tpg_nexus.attr,
+	NULL,
+};
+
+/* Start items for tcm_loop_naa_cit */
+
+struct se_portal_group *tcm_loop_make_naa_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_loop_hba *tl_hba = container_of(wwn,
+			struct tcm_loop_hba, tl_hba_wwn);
+	struct tcm_loop_tpg *tl_tpg;
+	char *tpgt_str, *end_ptr;
+	int ret;
+	unsigned short int tpgt;
+
+	tpgt_str = strstr(name, "tpgt_");
+	if (!tpgt_str) {
+		printk(KERN_ERR "Unable to locate \"tpgt_#\" directory"
+				" group\n");
+		return ERR_PTR(-EINVAL);
+	}
+	tpgt_str += 5; /* Skip ahead of "tpgt_" */
+	tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+
+	if (tpgt > TL_TPGS_PER_HBA) {
+		printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
+				" %u\n", tpgt, TL_TPGS_PER_HBA);
+		return ERR_PTR(-EINVAL);
+	}
+	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
+	tl_tpg->tl_hba = tl_hba;
+	tl_tpg->tl_tpgt = tpgt;
+	/*
+	 * Register the tl_tpg as a emulated SAS TCM Target Endpoint
+	 */
+	ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
+			wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg,
+			TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0)
+		return ERR_PTR(-ENOMEM);
+
+	printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s"
+		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
+		config_item_name(&wwn->wwn_group.cg_item), tpgt);
+
+	return &tl_tpg->tl_se_tpg;
+}
+
+void tcm_loop_drop_naa_tpg(
+	struct se_portal_group *se_tpg)
+{
+	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+				struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_hba *tl_hba;
+	unsigned short tpgt;
+
+	tl_hba = tl_tpg->tl_hba;
+	tpgt = tl_tpg->tl_tpgt;
+	/*
+	 * Release the I_T Nexus for the Virtual SAS link if present
+	 */
+	tcm_loop_drop_nexus(tl_tpg);
+	/*
+	 * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
+	 */
+	core_tpg_deregister(se_tpg);
+
+	printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s"
+		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
+		config_item_name(&wwn->wwn_group.cg_item), tpgt);
+}
+
+/* End items for tcm_loop_naa_cit */
+
+/* Start items for tcm_loop_cit */
+
+struct se_wwn *tcm_loop_make_scsi_hba(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct Scsi_Host *sh;
+	char *ptr;
+	int ret, off = 0;
+
+	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
+	if (!tl_hba) {
+		printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	/*
+	 * Determine the emulated Protocol Identifier and Target Port Name
+	 * based on the incoming configfs directory name.
+	 */
+	ptr = strstr(name, "naa.");
+	if (ptr) {
+		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
+		goto check_len;
+	}
+	ptr = strstr(name, "fc.");
+	if (ptr) {
+		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
+		off = 3; /* Skip over "fc." */
+		goto check_len;
+	}
+	ptr = strstr(name, "iqn.");
+	if (ptr) {
+		tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
+		goto check_len;
+	}
+
+	printk(KERN_ERR "Unable to locate prefix for emulated Target Port:"
+			" %s\n", name);
+	return ERR_PTR(-EINVAL);
+
+check_len:
+	if (strlen(name) > TL_WWN_ADDR_LEN) {
+		printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
+			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
+			TL_WWN_ADDR_LEN);
+		kfree(tl_hba);
+		return ERR_PTR(-EINVAL);
+	}
+	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
+
+	/*
+	 * Call device_register(tl_hba->dev) to register the emulated
+	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
+	 * device_register() callbacks in tcm_loop_driver_probe()
+	 */
+	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
+	if (ret)
+		goto out;
+
+	sh = tl_hba->sh;
+	tcm_loop_hba_no_cnt++;
+	printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target"
+		" %s Address: %s at Linux/SCSI Host ID: %d\n",
+		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
+
+	return &tl_hba->tl_hba_wwn;
+out:
+	kfree(tl_hba);
+	return ERR_PTR(ret);
+}
+
+void tcm_loop_drop_scsi_hba(
+	struct se_wwn *wwn)
+{
+	struct tcm_loop_hba *tl_hba = container_of(wwn,
+				struct tcm_loop_hba, tl_hba_wwn);
+	int host_no = tl_hba->sh->host_no;
+	/*
+	 * Call device_unregister() on the original tl_hba->dev.
+	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
+	 * release *tl_hba;
+	 */
+	device_unregister(&tl_hba->dev);
+
+	printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target"
+		" SAS Address: %s at Linux/SCSI Host ID: %d\n",
+		config_item_name(&wwn->wwn_group.cg_item), host_no);
+}
+
+/* Start items for tcm_loop_cit */
+static ssize_t tcm_loop_wwn_show_attr_version(
+	struct target_fabric_configfs *tf,
+	char *page)
+{
+	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
+}
+
+TF_WWN_ATTR_RO(tcm_loop, version);
+
+static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
+	&tcm_loop_wwn_version.attr,
+	NULL,
+};
+
+/* End items for tcm_loop_cit */
+
+static int tcm_loop_register_configfs(void)
+{
+	struct target_fabric_configfs *fabric;
+	struct config_group *tf_cg;
+	int ret;
+	/*
+	 * Set the TCM Loop HBA counter to zero
+	 */
+	tcm_loop_hba_no_cnt = 0;
+	/*
+	 * Register the top level struct config_item_type with TCM core
+	 */
+	fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
+	if (!fabric) {
+		printk(KERN_ERR "tcm_loop_register_configfs() failed!\n");
+		return -1;
+	}
+	/*
+	 * Setup the fabric API of function pointers used by target_core_mod
+	 */
+	fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
+	fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
+	fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
+	fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
+	fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
+	fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
+	fabric->tf_ops.tpg_get_pr_transport_id_len =
+					&tcm_loop_get_pr_transport_id_len;
+	fabric->tf_ops.tpg_parse_pr_out_transport_id =
+					&tcm_loop_parse_pr_out_transport_id;
+	fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
+	fabric->tf_ops.tpg_check_demo_mode_cache =
+					&tcm_loop_check_demo_mode_cache;
+	fabric->tf_ops.tpg_check_demo_mode_write_protect =
+					&tcm_loop_check_demo_mode_write_protect;
+	fabric->tf_ops.tpg_check_prod_mode_write_protect =
+					&tcm_loop_check_prod_mode_write_protect;
+	/*
+	 * The TCM loopback fabric module runs in demo-mode to a local
+	 * virtual SCSI device, so fabric dependent initator ACLs are
+	 * not required.
+	 */
+	fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
+	fabric->tf_ops.tpg_release_fabric_acl =
+					&tcm_loop_tpg_release_fabric_acl;
+	fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
+	/*
+	 * Since tcm_loop is mapping physical memory from Linux/SCSI
+	 * struct scatterlist arrays for each struct scsi_cmnd I/O,
+	 * we do not need TCM to allocate a iovec array for
+	 * virtual memory address mappings
+	 */
+	fabric->tf_ops.alloc_cmd_iovecs = NULL;
+	/*
+	 * Used for setting up remaining TCM resources in process context
+	 */
+	fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map;
+	fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
+	fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd;
+	fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd;
+	fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
+	fabric->tf_ops.close_session = &tcm_loop_close_session;
+	fabric->tf_ops.stop_session = &tcm_loop_stop_session;
+	fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0;
+	fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in;
+	fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
+	fabric->tf_ops.sess_get_initiator_sid = NULL;
+	fabric->tf_ops.write_pending = &tcm_loop_write_pending;
+	fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
+	/*
+	 * Not used for TCM loopback
+	 */
+	fabric->tf_ops.set_default_node_attributes =
+					&tcm_loop_set_default_node_attributes;
+	fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
+	fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
+	fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure;
+	fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
+	fabric->tf_ops.queue_status = &tcm_loop_queue_status;
+	fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
+	fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
+	fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
+	fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
+	fabric->tf_ops.pack_lun = &tcm_loop_pack_lun;
+
+	tf_cg = &fabric->tf_group;
+	/*
+	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
+	 */
+	fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
+	fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
+	fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
+	fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
+	/*
+	 * fabric_post_link() and fabric_pre_unlink() are used for
+	 * registration and release of TCM Loop Virtual SCSI LUNs.
+	 */
+	fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
+	fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
+	fabric->tf_ops.fabric_make_np = NULL;
+	fabric->tf_ops.fabric_drop_np = NULL;
+	/*
+	 * Setup default attribute lists for various fabric->tf_cit_tmpl
+	 */
+	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+	/*
+	 * Once fabric->tf_ops has been setup, now register the fabric for
+	 * use within TCM
+	 */
+	ret = target_fabric_configfs_register(fabric);
+	if (ret < 0) {
+		printk(KERN_ERR "target_fabric_configfs_register() for"
+				" TCM_Loop failed!\n");
+		target_fabric_configfs_free(fabric);
+		return -1;
+	}
+	/*
+	 * Setup our local pointer to *fabric.
+	 */
+	tcm_loop_fabric_configfs = fabric;
+	printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->"
+			" tcm_loop_fabric_configfs\n");
+	return 0;
+}
+
+static void tcm_loop_deregister_configfs(void)
+{
+	if (!tcm_loop_fabric_configfs)
+		return;
+
+	target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
+	tcm_loop_fabric_configfs = NULL;
+	printk(KERN_INFO "TCM_LOOP[0] - Cleared"
+				" tcm_loop_fabric_configfs\n");
+}
+
+static int __init tcm_loop_fabric_init(void)
+{
+	int ret;
+
+	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
+				sizeof(struct tcm_loop_cmd),
+				__alignof__(struct tcm_loop_cmd),
+				0, NULL);
+	if (!tcm_loop_cmd_cache) {
+		printk(KERN_ERR "kmem_cache_create() for"
+			" tcm_loop_cmd_cache failed\n");
+		return -ENOMEM;
+	}
+
+	ret = tcm_loop_alloc_core_bus();
+	if (ret)
+		return ret;
+
+	ret = tcm_loop_register_configfs();
+	if (ret) {
+		tcm_loop_release_core_bus();
+		return ret;
+	}
+
+	return 0;
+}
+
+static void __exit tcm_loop_fabric_exit(void)
+{
+	tcm_loop_deregister_configfs();
+	tcm_loop_release_core_bus();
+	kmem_cache_destroy(tcm_loop_cmd_cache);
+}
+
+MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
+MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
+MODULE_LICENSE("GPL");
+module_init(tcm_loop_fabric_init);
+module_exit(tcm_loop_fabric_exit);
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
new file mode 100644
index 000000000000..7e9f7ab45548
--- /dev/null
+++ b/drivers/target/loopback/tcm_loop.h
@@ -0,0 +1,77 @@
+#define TCM_LOOP_VERSION		"v2.1-rc1"
+#define TL_WWN_ADDR_LEN			256
+#define TL_TPGS_PER_HBA			32
+/*
+ * Defaults for struct scsi_host_template tcm_loop_driver_template
+ *
+ * We use large can_queue and cmd_per_lun here and let TCM enforce
+ * the underlying se_device_t->queue_depth.
+ */
+#define TL_SCSI_CAN_QUEUE		1024
+#define TL_SCSI_CMD_PER_LUN		1024
+#define TL_SCSI_MAX_SECTORS		1024
+#define TL_SCSI_SG_TABLESIZE		256
+/*
+ * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
+ */
+#define TL_SCSI_MAX_CMD_LEN		32
+
+#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG
+# define TL_CDB_DEBUG(x...)		printk(KERN_INFO x)
+#else
+# define TL_CDB_DEBUG(x...)
+#endif
+
+struct tcm_loop_cmd {
+	/* State of Linux/SCSI CDB+Data descriptor */
+	u32 sc_cmd_state;
+	/* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
+	struct scsi_cmnd *sc;
+	struct list_head *tl_cmd_list;
+	/* The TCM I/O descriptor that is accessed via container_of() */
+	struct se_cmd tl_se_cmd;
+	/* Sense buffer that will be mapped into outgoing status */
+	unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
+};
+
+struct tcm_loop_tmr {
+	atomic_t tmr_complete;
+	wait_queue_head_t tl_tmr_wait;
+};
+
+struct tcm_loop_nexus {
+	int it_nexus_active;
+	/*
+	 * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
+	 */
+	struct scsi_host *sh;
+	/*
+	 * Pointer to TCM session for I_T Nexus
+	 */
+	struct se_session *se_sess;
+};
+
+struct tcm_loop_nacl {
+	struct se_node_acl se_node_acl;
+};
+
+struct tcm_loop_tpg {
+	unsigned short tl_tpgt;
+	atomic_t tl_tpg_port_count;
+	struct se_portal_group tl_se_tpg;
+	struct tcm_loop_hba *tl_hba;
+};
+
+struct tcm_loop_hba {
+	u8 tl_proto_id;
+	unsigned char tl_wwn_address[TL_WWN_ADDR_LEN];
+	struct se_hba_s *se_hba;
+	struct se_lun *tl_hba_lun;
+	struct se_port *tl_hba_lun_sep;
+	struct se_device_s *se_dev_hba_ptr;
+	struct tcm_loop_nexus *tl_nexus;
+	struct device dev;
+	struct Scsi_Host *sh;
+	struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
+	struct se_wwn tl_hba_wwn;
+};
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index caf8dc18ee0a..a5f44a6e6e1d 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -3,8 +3,8 @@
  *
  * This file contains ConfigFS logic for the Generic Target Engine project.
  *
- * Copyright (c) 2008-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * Copyright (c) 2008-2011 Rising Tide Systems
+ * Copyright (c) 2008-2011 Linux-iSCSI.org
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -50,6 +50,7 @@
 #include "target_core_hba.h"
 #include "target_core_pr.h"
 #include "target_core_rd.h"
+#include "target_core_stat.h"
 
 static struct list_head g_tf_list;
 static struct mutex g_tf_lock;
@@ -1451,8 +1452,8 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
 	size_t count)
 {
 	struct se_device *dev;
-	unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL;
-	unsigned char *isid = NULL;
+	unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
+	unsigned char *t_fabric = NULL, *t_port = NULL;
 	char *orig, *ptr, *arg_p, *opts;
 	substring_t args[MAX_OPT_ARGS];
 	unsigned long long tmp_ll;
@@ -1488,9 +1489,17 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
 		switch (token) {
 		case Opt_initiator_fabric:
 			i_fabric = match_strdup(&args[0]);
+			if (!i_fabric) {
+				ret = -ENOMEM;
+				goto out;
+			}
 			break;
 		case Opt_initiator_node:
 			i_port = match_strdup(&args[0]);
+			if (!i_port) {
+				ret = -ENOMEM;
+				goto out;
+			}
 			if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
 				printk(KERN_ERR "APTPL metadata initiator_node="
 					" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
@@ -1501,6 +1510,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
 			break;
 		case Opt_initiator_sid:
 			isid = match_strdup(&args[0]);
+			if (!isid) {
+				ret = -ENOMEM;
+				goto out;
+			}
 			if (strlen(isid) > PR_REG_ISID_LEN) {
 				printk(KERN_ERR "APTPL metadata initiator_isid"
 					"= exceeds PR_REG_ISID_LEN: %d\n",
@@ -1511,6 +1524,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
 			break;
 		case Opt_sa_res_key:
 			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				goto out;
+			}
 			ret = strict_strtoull(arg_p, 0, &tmp_ll);
 			if (ret < 0) {
 				printk(KERN_ERR "strict_strtoull() failed for"
@@ -1547,9 +1564,17 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
 		 */
 		case Opt_target_fabric:
 			t_fabric = match_strdup(&args[0]);
+			if (!t_fabric) {
+				ret = -ENOMEM;
+				goto out;
+			}
 			break;
 		case Opt_target_node:
 			t_port = match_strdup(&args[0]);
+			if (!t_port) {
+				ret = -ENOMEM;
+				goto out;
+			}
 			if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
 				printk(KERN_ERR "APTPL metadata target_node="
 					" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
@@ -1592,6 +1617,11 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
 			i_port, isid, mapped_lun, t_port, tpgt, target_lun,
 			res_holder, all_tg_pt, type);
 out:
+	kfree(i_fabric);
+	kfree(i_port);
+	kfree(isid);
+	kfree(t_fabric);
+	kfree(t_port);
 	kfree(orig);
 	return (ret == 0) ? count : ret;
 }
@@ -1798,7 +1828,9 @@ static ssize_t target_core_store_dev_enable(
 		return -EINVAL;
 
 	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
-	if (!(dev) || IS_ERR(dev))
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
+	else if (!dev)
 		return -EINVAL;
 
 	se_dev->se_dev_ptr = dev;
@@ -2678,6 +2710,34 @@ static struct config_item_type target_core_alua_cit = {
 
 /* End functions for struct config_item_type target_core_alua_cit */
 
+/* Start functions for struct config_item_type target_core_stat_cit */
+
+static struct config_group *target_core_stat_mkdir(
+	struct config_group *group,
+	const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_stat_rmdir(
+	struct config_group *group,
+	struct config_item *item)
+{
+	return;
+}
+
+static struct configfs_group_operations target_core_stat_group_ops = {
+	.make_group		= &target_core_stat_mkdir,
+	.drop_item		= &target_core_stat_rmdir,
+};
+
+static struct config_item_type target_core_stat_cit = {
+	.ct_group_ops		= &target_core_stat_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_stat_cit */
+
 /* Start functions for struct config_item_type target_core_hba_cit */
 
 static struct config_group *target_core_make_subdev(
@@ -2690,10 +2750,12 @@ static struct config_group *target_core_make_subdev(
 	struct config_item *hba_ci = &group->cg_item;
 	struct se_hba *hba = item_to_hba(hba_ci);
 	struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
+	struct config_group *dev_stat_grp = NULL;
+	int errno = -ENOMEM, ret;
 
-	if (mutex_lock_interruptible(&hba->hba_access_mutex))
-		return NULL;
-
+	ret = mutex_lock_interruptible(&hba->hba_access_mutex);
+	if (ret)
+		return ERR_PTR(ret);
 	/*
 	 * Locate the struct se_subsystem_api from parent's struct se_hba.
 	 */
@@ -2723,7 +2785,7 @@ static struct config_group *target_core_make_subdev(
 	se_dev->se_dev_hba = hba;
 	dev_cg = &se_dev->se_dev_group;
 
-	dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+	dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
 			GFP_KERNEL);
 	if (!(dev_cg->default_groups))
 		goto out;
@@ -2755,13 +2817,17 @@ static struct config_group *target_core_make_subdev(
 			&target_core_dev_wwn_cit);
 	config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
 			"alua", &target_core_alua_tg_pt_gps_cit);
+	config_group_init_type_name(&se_dev->dev_stat_grps.stat_group,
+			"statistics", &target_core_stat_cit);
+
 	dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
 	dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
 	dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
 	dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
-	dev_cg->default_groups[4] = NULL;
+	dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group;
+	dev_cg->default_groups[5] = NULL;
 	/*
-	 * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp
+	 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
 	 */
 	tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
 	if (!(tg_pt_gp))
@@ -2781,6 +2847,17 @@ static struct config_group *target_core_make_subdev(
 	tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
 	tg_pt_gp_cg->default_groups[1] = NULL;
 	T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
+	/*
+	 * Add core/$HBA/$DEV/statistics/ default groups
+	 */
+	dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
+	dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
+				GFP_KERNEL);
+	if (!dev_stat_grp->default_groups) {
+		printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n");
+		goto out;
+	}
+	target_stat_setup_dev_default_groups(se_dev);
 
 	printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
 		" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
@@ -2792,6 +2869,8 @@ out:
 		core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
 		T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
 	}
+	if (dev_stat_grp)
+		kfree(dev_stat_grp->default_groups);
 	if (tg_pt_gp_cg)
 		kfree(tg_pt_gp_cg->default_groups);
 	if (dev_cg)
@@ -2801,7 +2880,7 @@ out:
 	kfree(se_dev);
 unlock:
 	mutex_unlock(&hba->hba_access_mutex);
-	return NULL;
+	return ERR_PTR(errno);
 }
 
 static void target_core_drop_subdev(
@@ -2813,7 +2892,7 @@ static void target_core_drop_subdev(
 	struct se_hba *hba;
 	struct se_subsystem_api *t;
 	struct config_item *df_item;
-	struct config_group *dev_cg, *tg_pt_gp_cg;
+	struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp;
 	int i;
 
 	hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
@@ -2825,6 +2904,14 @@ static void target_core_drop_subdev(
 	list_del(&se_dev->g_se_dev_list);
 	spin_unlock(&se_global->g_device_lock);
 
+	dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
+	for (i = 0; dev_stat_grp->default_groups[i]; i++) {
+		df_item = &dev_stat_grp->default_groups[i]->cg_item;
+		dev_stat_grp->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(dev_stat_grp->default_groups);
+
 	tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
 	for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
 		df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
@@ -3044,7 +3131,7 @@ static struct config_item_type target_core_cit = {
 
 /* Stop functions for struct config_item_type target_core_hba_cit */
 
-static int target_core_init_configfs(void)
+static int __init target_core_init_configfs(void)
 {
 	struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
 	struct config_group *lu_gp_cg = NULL;
@@ -3176,7 +3263,7 @@ out_global:
 	return -1;
 }
 
-static void target_core_exit_configfs(void)
+static void __exit target_core_exit_configfs(void)
 {
 	struct configfs_subsystem *subsys;
 	struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 350ed401544e..3fb8e32506ed 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -589,6 +589,7 @@ static void core_export_port(
  *	Called with struct se_device->se_port_lock spinlock held.
  */
 static void core_release_port(struct se_device *dev, struct se_port *port)
+	__releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
 {
 	/*
 	 * Wait for any port reference for PR ALL_TG_PT=1 operation
@@ -779,49 +780,14 @@ void se_release_vpd_for_dev(struct se_device *dev)
 	return;
 }
 
-/*
- * Called with struct se_hba->device_lock held.
- */
-void se_clear_dev_ports(struct se_device *dev)
-{
-	struct se_hba *hba = dev->se_hba;
-	struct se_lun *lun;
-	struct se_portal_group *tpg;
-	struct se_port *sep, *sep_tmp;
-
-	spin_lock(&dev->se_port_lock);
-	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
-		spin_unlock(&dev->se_port_lock);
-		spin_unlock(&hba->device_lock);
-
-		lun = sep->sep_lun;
-		tpg = sep->sep_tpg;
-		spin_lock(&lun->lun_sep_lock);
-		if (lun->lun_se_dev == NULL) {
-			spin_unlock(&lun->lun_sep_lock);
-			continue;
-		}
-		spin_unlock(&lun->lun_sep_lock);
-
-		core_dev_del_lun(tpg, lun->unpacked_lun);
-
-		spin_lock(&hba->device_lock);
-		spin_lock(&dev->se_port_lock);
-	}
-	spin_unlock(&dev->se_port_lock);
-
-	return;
-}
-
 /*	se_free_virtual_device():
  *
  *	Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
  */
 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
 {
-	spin_lock(&hba->device_lock);
-	se_clear_dev_ports(dev);
-	spin_unlock(&hba->device_lock);
+	if (!list_empty(&dev->dev_sep_list))
+		dump_stack();
 
 	core_alua_free_lu_gp_mem(dev);
 	se_release_device_for_hba(dev);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index b65d1c8e7740..07ab5a3bb8e8 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -4,10 +4,10 @@
  * This file contains generic fabric module configfs infrastructure for
  * TCM v4.x code
  *
- * Copyright (c) 2010 Rising Tide Systems
- * Copyright (c) 2010 Linux-iSCSI.org
+ * Copyright (c) 2010,2011 Rising Tide Systems
+ * Copyright (c) 2010,2011 Linux-iSCSI.org
  *
- * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
+ * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org>
 *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -48,6 +48,7 @@
 #include "target_core_alua.h"
 #include "target_core_hba.h"
 #include "target_core_pr.h"
+#include "target_core_stat.h"
 
 #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)		\
 static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
@@ -241,6 +242,32 @@ TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
 
 /* End of tfc_tpg_mappedlun_cit */
 
+/* Start of tfc_tpg_mappedlun_port_cit */
+
+static struct config_group *target_core_mappedlun_stat_mkdir(
+	struct config_group *group,
+	const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_mappedlun_stat_rmdir(
+	struct config_group *group,
+	struct config_item *item)
+{
+	return;
+}
+
+static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = {
+	.make_group		= target_core_mappedlun_stat_mkdir,
+	.drop_item		= target_core_mappedlun_stat_rmdir,
+};
+
+TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops,
+		NULL);
+
+/* End of tfc_tpg_mappedlun_port_cit */
+
 /* Start of tfc_tpg_nacl_attrib_cit */
 
 CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
@@ -294,6 +321,7 @@ static struct config_group *target_fabric_make_mappedlun(
 	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
 	struct se_lun_acl *lacl;
 	struct config_item *acl_ci;
+	struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
 	char *buf;
 	unsigned long mapped_lun;
 	int ret = 0;
@@ -330,15 +358,42 @@ static struct config_group *target_fabric_make_mappedlun(
 
 	lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
 			config_item_name(acl_ci), &ret);
-	if (!(lacl))
+	if (!(lacl)) {
+		ret = -EINVAL;
 		goto out;
+	}
+
+	lacl_cg = &lacl->se_lun_group;
+	lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!lacl_cg->default_groups) {
+		printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n");
+		ret = -ENOMEM;
+		goto out;
+	}
 
 	config_group_init_type_name(&lacl->se_lun_group, name,
 			&TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+	config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
+			"statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit);
+	lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
+	lacl_cg->default_groups[1] = NULL;
+
+	ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+	ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
+				GFP_KERNEL);
+	if (!ml_stat_grp->default_groups) {
+		printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	target_stat_setup_mappedlun_default_groups(lacl);
 
 	kfree(buf);
 	return &lacl->se_lun_group;
 out:
+	if (lacl_cg)
+		kfree(lacl_cg->default_groups);
 	kfree(buf);
 	return ERR_PTR(ret);
 }
@@ -347,6 +402,28 @@ static void target_fabric_drop_mappedlun(
 	struct config_group *group,
 	struct config_item *item)
 {
+	struct se_lun_acl *lacl = container_of(to_config_group(item),
+			struct se_lun_acl, se_lun_group);
+	struct config_item *df_item;
+	struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
+	int i;
+
+	ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+	for (i = 0; ml_stat_grp->default_groups[i]; i++) {
+		df_item = &ml_stat_grp->default_groups[i]->cg_item;
+		ml_stat_grp->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(ml_stat_grp->default_groups);
+
+	lacl_cg = &lacl->se_lun_group;
+	for (i = 0; lacl_cg->default_groups[i]; i++) {
+		df_item = &lacl_cg->default_groups[i]->cg_item;
+		lacl_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(lacl_cg->default_groups);
+
 	config_item_put(item);
 }
 
@@ -376,6 +453,15 @@ TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
 
 /* End of tfc_tpg_nacl_base_cit */
 
+/* Start of tfc_node_fabric_stats_cit */
+/*
+ * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group
+ * to allow fabrics access to ->acl_fabric_stat_group->default_groups[]
+ */
+TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL);
+
+/* End of tfc_wwn_fabric_stats_cit */
+
 /* Start of tfc_tpg_nacl_cit */
 
 static struct config_group *target_fabric_make_nodeacl(
@@ -402,7 +488,8 @@ static struct config_group *target_fabric_make_nodeacl(
 	nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
 	nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
 	nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
-	nacl_cg->default_groups[3] = NULL;
+	nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group;
+	nacl_cg->default_groups[4] = NULL;
 
 	config_group_init_type_name(&se_nacl->acl_group, name,
 			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
@@ -412,6 +499,9 @@ static struct config_group *target_fabric_make_nodeacl(
 			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
 	config_group_init_type_name(&se_nacl->acl_param_group, "param",
 			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+	config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
+			"fabric_statistics",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit);
 
 	return &se_nacl->acl_group;
 }
@@ -758,6 +848,31 @@ TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_at
 
 /* End of tfc_tpg_port_cit */
 
+/* Start of tfc_tpg_port_stat_cit */
+
+static struct config_group *target_core_port_stat_mkdir(
+	struct config_group *group,
+	const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_port_stat_rmdir(
+	struct config_group *group,
+	struct config_item *item)
+{
+	return;
+}
+
+static struct configfs_group_operations target_fabric_port_stat_group_ops = {
+	.make_group		= target_core_port_stat_mkdir,
+	.drop_item		= target_core_port_stat_rmdir,
+};
+
+TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL);
+
+/* End of tfc_tpg_port_stat_cit */
+
 /* Start of tfc_tpg_lun_cit */
 
 static struct config_group *target_fabric_make_lun(
@@ -768,7 +883,9 @@ static struct config_group *target_fabric_make_lun(
 	struct se_portal_group *se_tpg = container_of(group,
 			struct se_portal_group, tpg_lun_group);
 	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct config_group *lun_cg = NULL, *port_stat_grp = NULL;
 	unsigned long unpacked_lun;
+	int errno;
 
 	if (strstr(name, "lun_") != name) {
 		printk(KERN_ERR "Unable to locate \'_\" in"
@@ -782,16 +899,64 @@ static struct config_group *target_fabric_make_lun(
 	if (!(lun))
 		return ERR_PTR(-EINVAL);
 
+	lun_cg = &lun->lun_group;
+	lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!lun_cg->default_groups) {
+		printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
 	config_group_init_type_name(&lun->lun_group, name,
 			&TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+	config_group_init_type_name(&lun->port_stat_grps.stat_group,
+			"statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit);
+	lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
+	lun_cg->default_groups[1] = NULL;
+
+	port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+	port_stat_grp->default_groups =  kzalloc(sizeof(struct config_group) * 3,
+				GFP_KERNEL);
+	if (!port_stat_grp->default_groups) {
+		printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n");
+		errno = -ENOMEM;
+		goto out;
+	}
+	target_stat_setup_port_default_groups(lun);
 
 	return &lun->lun_group;
+out:
+	if (lun_cg)
+		kfree(lun_cg->default_groups);
+	return ERR_PTR(errno);
 }
 
 static void target_fabric_drop_lun(
 	struct config_group *group,
 	struct config_item *item)
 {
+	struct se_lun *lun = container_of(to_config_group(item),
+				struct se_lun, lun_group);
+	struct config_item *df_item;
+	struct config_group *lun_cg, *port_stat_grp;
+	int i;
+
+	port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+	for (i = 0; port_stat_grp->default_groups[i]; i++) {
+		df_item = &port_stat_grp->default_groups[i]->cg_item;
+		port_stat_grp->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(port_stat_grp->default_groups);
+
+	lun_cg = &lun->lun_group;
+	for (i = 0; lun_cg->default_groups[i]; i++) {
+		df_item = &lun_cg->default_groups[i]->cg_item;
+		lun_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(lun_cg->default_groups);
+
 	config_item_put(item);
 }
 
@@ -946,6 +1111,15 @@ TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
 
 /* End of tfc_tpg_cit */
 
+/* Start of tfc_wwn_fabric_stats_cit */
+/*
+ * This is used as a placeholder for struct se_wwn->fabric_stat_group
+ * to allow fabrics access to ->fabric_stat_group->default_groups[]
+ */
+TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
+
+/* End of tfc_wwn_fabric_stats_cit */
+
 /* Start of tfc_wwn_cit */
 
 static struct config_group *target_fabric_make_wwn(
@@ -966,8 +1140,17 @@ static struct config_group *target_fabric_make_wwn(
 		return ERR_PTR(-EINVAL);
 
 	wwn->wwn_tf = tf;
+	/*
+	 * Setup default groups from pre-allocated wwn->wwn_default_groups
+	 */
+	wwn->wwn_group.default_groups = wwn->wwn_default_groups;
+	wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group;
+	wwn->wwn_group.default_groups[1] = NULL;
+
 	config_group_init_type_name(&wwn->wwn_group, name,
 			&TF_CIT_TMPL(tf)->tfc_tpg_cit);
+	config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
+			&TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit);
 
 	return &wwn->wwn_group;
 }
@@ -976,6 +1159,18 @@ static void target_fabric_drop_wwn(
 	struct config_group *group,
 	struct config_item *item)
 {
+	struct se_wwn *wwn = container_of(to_config_group(item),
+				struct se_wwn, wwn_group);
+	struct config_item *df_item;
+	struct config_group *cg = &wwn->wwn_group;
+	int i;
+
+	for (i = 0; cg->default_groups[i]; i++) {
+		df_item = &cg->default_groups[i]->cg_item;
+		cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
 	config_item_put(item);
 }
 
@@ -1015,9 +1210,11 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf)
 {
 	target_fabric_setup_discovery_cit(tf);
 	target_fabric_setup_wwn_cit(tf);
+	target_fabric_setup_wwn_fabric_stats_cit(tf);
 	target_fabric_setup_tpg_cit(tf);
 	target_fabric_setup_tpg_base_cit(tf);
 	target_fabric_setup_tpg_port_cit(tf);
+	target_fabric_setup_tpg_port_stat_cit(tf);
 	target_fabric_setup_tpg_lun_cit(tf);
 	target_fabric_setup_tpg_np_cit(tf);
 	target_fabric_setup_tpg_np_base_cit(tf);
@@ -1028,7 +1225,9 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf)
 	target_fabric_setup_tpg_nacl_attrib_cit(tf);
 	target_fabric_setup_tpg_nacl_auth_cit(tf);
 	target_fabric_setup_tpg_nacl_param_cit(tf);
+	target_fabric_setup_tpg_nacl_stat_cit(tf);
 	target_fabric_setup_tpg_mappedlun_cit(tf);
+	target_fabric_setup_tpg_mappedlun_stat_cit(tf);
 
 	return 0;
 }
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index a3c695adabec..d57ad672677f 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -34,6 +34,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_device.h>
 #include <target/target_core_transport.h>
+#include <target/target_core_fabric_lib.h>
 #include <target/target_core_fabric_ops.h>
 #include <target/target_core_configfs.h>
 
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 190ca8ac2498..02f553aef43d 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -134,7 +134,7 @@ static struct se_device *fd_create_virtdevice(
 	mm_segment_t old_fs;
 	struct file *file;
 	struct inode *inode = NULL;
-	int dev_flags = 0, flags;
+	int dev_flags = 0, flags, ret = -EINVAL;
 
 	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
 
@@ -146,6 +146,7 @@ static struct se_device *fd_create_virtdevice(
 	if (IS_ERR(dev_p)) {
 		printk(KERN_ERR "getname(%s) failed: %lu\n",
 			fd_dev->fd_dev_name, IS_ERR(dev_p));
+		ret = PTR_ERR(dev_p);
 		goto fail;
 	}
 #if 0
@@ -165,8 +166,12 @@ static struct se_device *fd_create_virtdevice(
 		flags |= O_SYNC;
 
 	file = filp_open(dev_p, flags, 0600);
-
-	if (IS_ERR(file) || !file || !file->f_dentry) {
+	if (IS_ERR(file)) {
+		printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+		ret = PTR_ERR(file);
+		goto fail;
+	}
+	if (!file || !file->f_dentry) {
 		printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
 		goto fail;
 	}
@@ -241,7 +246,7 @@ fail:
 		fd_dev->fd_file = NULL;
 	}
 	putname(dev_p);
-	return NULL;
+	return ERR_PTR(ret);
 }
 
 /*	fd_free_device(): (Part of se_subsystem_api_t template)
@@ -509,7 +514,7 @@ enum {
 static match_table_t tokens = {
 	{Opt_fd_dev_name, "fd_dev_name=%s"},
 	{Opt_fd_dev_size, "fd_dev_size=%s"},
-	{Opt_fd_buffered_io, "fd_buffered_id=%d"},
+	{Opt_fd_buffered_io, "fd_buffered_io=%d"},
 	{Opt_err, NULL}
 };
 
@@ -536,15 +541,26 @@ static ssize_t fd_set_configfs_dev_params(
 		token = match_token(ptr, tokens, args);
 		switch (token) {
 		case Opt_fd_dev_name:
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
 			snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
-					"%s", match_strdup(&args[0]));
+					"%s", arg_p);
+			kfree(arg_p);
 			printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
 					fd_dev->fd_dev_name);
 			fd_dev->fbd_flags |= FBDF_HAS_PATH;
 			break;
 		case Opt_fd_dev_size:
 			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
 			ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+			kfree(arg_p);
 			if (ret < 0) {
 				printk(KERN_ERR "strict_strtoull() failed for"
 						" fd_dev_size=\n");
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 6ec51cbc018e..0b8f8da89019 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -151,19 +151,8 @@ out_free_hba:
 int
 core_delete_hba(struct se_hba *hba)
 {
-	struct se_device *dev, *dev_tmp;
-
-	spin_lock(&hba->device_lock);
-	list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) {
-
-		se_clear_dev_ports(dev);
-		spin_unlock(&hba->device_lock);
-
-		se_release_device_for_hba(dev);
-
-		spin_lock(&hba->device_lock);
-	}
-	spin_unlock(&hba->device_lock);
+	if (!list_empty(&hba->hba_dev_list))
+		dump_stack();
 
 	hba->transport->detach_hba(hba);
 
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index eb0afec046e1..86639004af9e 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -129,10 +129,11 @@ static struct se_device *iblock_create_virtdevice(
 	struct request_queue *q;
 	struct queue_limits *limits;
 	u32 dev_flags = 0;
+	int ret = -EINVAL;
 
 	if (!(ib_dev)) {
 		printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
-		return 0;
+		return ERR_PTR(ret);
 	}
 	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
 	/*
@@ -141,7 +142,7 @@ static struct se_device *iblock_create_virtdevice(
 	ib_dev->ibd_bio_set = bioset_create(32, 64);
 	if (!(ib_dev->ibd_bio_set)) {
 		printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
-		return 0;
+		return ERR_PTR(-ENOMEM);
 	}
 	printk(KERN_INFO "IBLOCK: Created bio_set()\n");
 	/*
@@ -153,8 +154,10 @@ static struct se_device *iblock_create_virtdevice(
 
 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
 				FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
-	if (IS_ERR(bd))
+	if (IS_ERR(bd)) {
+		ret = PTR_ERR(bd);
 		goto failed;
+	}
 	/*
 	 * Setup the local scope queue_limits from struct request_queue->limits
 	 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
@@ -184,9 +187,7 @@ static struct se_device *iblock_create_virtdevice(
 	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
 	 * in ATA and we need to set TPE=1
 	 */
-	if (blk_queue_discard(bdev_get_queue(bd))) {
-		struct request_queue *q = bdev_get_queue(bd);
-
+	if (blk_queue_discard(q)) {
 		DEV_ATTRIB(dev)->max_unmap_lba_count =
 				q->limits.max_discard_sectors;
 		/*
@@ -212,7 +213,7 @@ failed:
 	ib_dev->ibd_bd = NULL;
 	ib_dev->ibd_major = 0;
 	ib_dev->ibd_minor = 0;
-	return NULL;
+	return ERR_PTR(ret);
 }
 
 static void iblock_free_device(void *p)
@@ -467,7 +468,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
 					       const char *page, ssize_t count)
 {
 	struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
-	char *orig, *ptr, *opts;
+	char *orig, *ptr, *arg_p, *opts;
 	substring_t args[MAX_OPT_ARGS];
 	int ret = 0, arg, token;
 
@@ -490,9 +491,14 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
 				ret = -EEXIST;
 				goto out;
 			}
-
-			ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
-				"%s", match_strdup(&args[0]));
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
+			snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
+					"%s", arg_p);
+			kfree(arg_p);
 			printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
 					ib_dev->ibd_udev_path);
 			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 5a9d2ba4b609..7ff6a35f26ac 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -441,6 +441,7 @@ static struct se_device *pscsi_create_type_disk(
 	struct pscsi_dev_virt *pdv,
 	struct se_subsystem_dev *se_dev,
 	struct se_hba *hba)
+	__releases(sh->host_lock)
 {
 	struct se_device *dev;
 	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
@@ -488,6 +489,7 @@ static struct se_device *pscsi_create_type_rom(
 	struct pscsi_dev_virt *pdv,
 	struct se_subsystem_dev *se_dev,
 	struct se_hba *hba)
+	__releases(sh->host_lock)
 {
 	struct se_device *dev;
 	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
@@ -522,6 +524,7 @@ static struct se_device *pscsi_create_type_other(
 	struct pscsi_dev_virt *pdv,
 	struct se_subsystem_dev *se_dev,
 	struct se_hba *hba)
+	__releases(sh->host_lock)
 {
 	struct se_device *dev;
 	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
@@ -555,7 +558,7 @@ static struct se_device *pscsi_create_virtdevice(
 	if (!(pdv)) {
 		printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
 				" parameter\n");
-		return NULL;
+		return ERR_PTR(-EINVAL);
 	}
 	/*
 	 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
@@ -565,7 +568,7 @@ static struct se_device *pscsi_create_virtdevice(
 		if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
 			printk(KERN_ERR "pSCSI: Unable to locate struct"
 				" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
-			return NULL;
+			return ERR_PTR(-ENODEV);
 		}
 		/*
 		 * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
@@ -574,7 +577,7 @@ static struct se_device *pscsi_create_virtdevice(
 		if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
 			printk(KERN_ERR "pSCSI: udev_path attribute has not"
 				" been set before ENABLE=1\n");
-			return NULL;
+			return ERR_PTR(-EINVAL);
 		}
 		/*
 		 * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
@@ -587,12 +590,12 @@ static struct se_device *pscsi_create_virtdevice(
 				printk(KERN_ERR "pSCSI: Unable to set hba_mode"
 					" with active devices\n");
 				spin_unlock(&hba->device_lock);
-				return NULL;
+				return ERR_PTR(-EEXIST);
 			}
 			spin_unlock(&hba->device_lock);
 
 			if (pscsi_pmode_enable_hba(hba, 1) != 1)
-				return NULL;
+				return ERR_PTR(-ENODEV);
 
 			legacy_mode_enable = 1;
 			hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
@@ -602,14 +605,14 @@ static struct se_device *pscsi_create_virtdevice(
 			if (!(sh)) {
 				printk(KERN_ERR "pSCSI: Unable to locate"
 					" pdv_host_id: %d\n", pdv->pdv_host_id);
-				return NULL;
+				return ERR_PTR(-ENODEV);
 			}
 		}
 	} else {
 		if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
 			printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
 				" struct Scsi_Host exists\n");
-			return NULL;
+			return ERR_PTR(-EEXIST);
 		}
 	}
 
@@ -644,7 +647,7 @@ static struct se_device *pscsi_create_virtdevice(
 				hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
 			}
 			pdv->pdv_sd = NULL;
-			return NULL;
+			return ERR_PTR(-ENODEV);
 		}
 		return dev;
 	}
@@ -660,7 +663,7 @@ static struct se_device *pscsi_create_virtdevice(
 		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
 	}
 
-	return NULL;
+	return ERR_PTR(-ENODEV);
 }
 
 /*	pscsi_free_device(): (Part of se_subsystem_api_t template)
@@ -816,6 +819,7 @@ pscsi_alloc_task(struct se_cmd *cmd)
 		if (!(pt->pscsi_cdb)) {
 			printk(KERN_ERR "pSCSI: Unable to allocate extended"
 					" pt->pscsi_cdb\n");
+			kfree(pt);
 			return NULL;
 		}
 	} else
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 8dc6d74c1d40..7837dd365a9d 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -150,7 +150,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
 	if (rd_dev->rd_page_count <= 0) {
 		printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
 			rd_dev->rd_page_count);
-		return -1;
+		return -EINVAL;
 	}
 	total_sg_needed = rd_dev->rd_page_count;
 
@@ -160,7 +160,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
 	if (!(sg_table)) {
 		printk(KERN_ERR "Unable to allocate memory for Ramdisk"
 			" scatterlist tables\n");
-		return -1;
+		return -ENOMEM;
 	}
 
 	rd_dev->sg_table_array = sg_table;
@@ -175,7 +175,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
 		if (!(sg)) {
 			printk(KERN_ERR "Unable to allocate scatterlist array"
 				" for struct rd_dev\n");
-			return -1;
+			return -ENOMEM;
 		}
 
 		sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
@@ -191,7 +191,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
 			if (!(pg)) {
 				printk(KERN_ERR "Unable to allocate scatterlist"
 					" pages for struct rd_dev_sg_table\n");
-				return -1;
+				return -ENOMEM;
 			}
 			sg_assign_page(&sg[j], pg);
 			sg[j].length = PAGE_SIZE;
@@ -253,12 +253,13 @@ static struct se_device *rd_create_virtdevice(
 	struct se_dev_limits dev_limits;
 	struct rd_dev *rd_dev = p;
 	struct rd_host *rd_host = hba->hba_ptr;
-	int dev_flags = 0;
+	int dev_flags = 0, ret;
 	char prod[16], rev[4];
 
 	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
 
-	if (rd_build_device_space(rd_dev) < 0)
+	ret = rd_build_device_space(rd_dev);
+	if (ret < 0)
 		goto fail;
 
 	snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
@@ -292,7 +293,7 @@ static struct se_device *rd_create_virtdevice(
 
 fail:
 	rd_release_device_space(rd_dev);
-	return NULL;
+	return ERR_PTR(ret);
 }
 
 static struct se_device *rd_DIRECT_create_virtdevice(
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 13badfbaf9c0..3ea19e29d8ec 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -14,8 +14,6 @@
 #define RD_BLOCKSIZE		512
 #define RD_MAX_SECTORS		1024
 
-extern struct kmem_cache *se_mem_cache;
-
 /* Used in target_core_init_configfs() for virtual LUN 0 access */
 int __init rd_module_init(void);
 void rd_module_exit(void);
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
new file mode 100644
index 000000000000..5e3a067a7475
--- /dev/null
+++ b/drivers/target/target_core_stat.c
@@ -0,0 +1,1810 @@
+/*******************************************************************************
+ * Filename:  target_core_stat.c
+ *
+ * Copyright (c) 2011 Rising Tide Systems
+ * Copyright (c) 2011 Linux-iSCSI.org
+ *
+ * Modern ConfigFS group context specific statistics based on original
+ * target_core_mib.c code
+ *
+ * Copyright (c) 2006-2007 SBE, Inc.  All Rights Reserved.
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/blkdev.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_hba.h"
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+#define NONE		"None"
+#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
+
+#define SCSI_LU_INDEX			1
+#define LU_COUNT			1
+
+/*
+ * SCSI Device Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_dev, se_dev_stat_grps);
+#define DEV_STAT_SCSI_DEV_ATTR(_name, _mode)				\
+static struct target_stat_scsi_dev_attribute				\
+			target_stat_scsi_dev_##_name =			\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_dev_show_attr_##_name,				\
+	target_stat_scsi_dev_store_attr_##_name);
+
+#define DEV_STAT_SCSI_DEV_ATTR_RO(_name)				\
+static struct target_stat_scsi_dev_attribute				\
+			target_stat_scsi_dev_##_name =			\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_dev_show_attr_##_name);
+
+static ssize_t target_stat_scsi_dev_show_attr_inst(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_hba *hba = se_subdev->se_dev_hba;
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_dev_show_attr_indx(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_dev_show_attr_role(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "Target\n");
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(role);
+
+static ssize_t target_stat_scsi_dev_show_attr_ports(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(ports);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_dev, se_dev_stat_grps, scsi_dev_group);
+
+static struct configfs_attribute *target_stat_scsi_dev_attrs[] = {
+	&target_stat_scsi_dev_inst.attr,
+	&target_stat_scsi_dev_indx.attr,
+	&target_stat_scsi_dev_role.attr,
+	&target_stat_scsi_dev_ports.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = {
+	.show_attribute		= target_stat_scsi_dev_attr_show,
+	.store_attribute	= target_stat_scsi_dev_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_dev_cit = {
+	.ct_item_ops		= &target_stat_scsi_dev_attrib_ops,
+	.ct_attrs		= target_stat_scsi_dev_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Target Device Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_dev, se_dev_stat_grps);
+#define DEV_STAT_SCSI_TGT_DEV_ATTR(_name, _mode)			\
+static struct target_stat_scsi_tgt_dev_attribute			\
+			target_stat_scsi_tgt_dev_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_tgt_dev_show_attr_##_name,			\
+	target_stat_scsi_tgt_dev_store_attr_##_name);
+
+#define DEV_STAT_SCSI_TGT_DEV_ATTR_RO(_name)				\
+static struct target_stat_scsi_tgt_dev_attribute			\
+			target_stat_scsi_tgt_dev_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_tgt_dev_show_attr_##_name);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_hba *hba = se_subdev->se_dev_hba;
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+	char status[16];
+
+	if (!dev)
+		return -ENODEV;
+
+	switch (dev->dev_status) {
+	case TRANSPORT_DEVICE_ACTIVATED:
+		strcpy(status, "activated");
+		break;
+	case TRANSPORT_DEVICE_DEACTIVATED:
+		strcpy(status, "deactivated");
+		break;
+	case TRANSPORT_DEVICE_SHUTDOWN:
+		strcpy(status, "shutdown");
+		break;
+	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+		strcpy(status, "offline");
+		break;
+	default:
+		sprintf(status, "unknown(%d)", dev->dev_status);
+		break;
+	}
+
+	return snprintf(page, PAGE_SIZE, "%s\n", status);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+	int non_accessible_lus;
+
+	if (!dev)
+		return -ENODEV;
+
+	switch (dev->dev_status) {
+	case TRANSPORT_DEVICE_ACTIVATED:
+		non_accessible_lus = 0;
+		break;
+	case TRANSPORT_DEVICE_DEACTIVATED:
+	case TRANSPORT_DEVICE_SHUTDOWN:
+	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+	default:
+		non_accessible_lus = 1;
+		break;
+	}
+
+	return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
+
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_dev, se_dev_stat_grps, scsi_tgt_dev_group);
+
+static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
+	&target_stat_scsi_tgt_dev_inst.attr,
+	&target_stat_scsi_tgt_dev_indx.attr,
+	&target_stat_scsi_tgt_dev_num_lus.attr,
+	&target_stat_scsi_tgt_dev_status.attr,
+	&target_stat_scsi_tgt_dev_non_access_lus.attr,
+	&target_stat_scsi_tgt_dev_resets.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = {
+	.show_attribute		= target_stat_scsi_tgt_dev_attr_show,
+	.store_attribute	= target_stat_scsi_tgt_dev_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_tgt_dev_cit = {
+	.ct_item_ops		= &target_stat_scsi_tgt_dev_attrib_ops,
+	.ct_attrs		= target_stat_scsi_tgt_dev_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Logical Unit Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_lu, se_dev_stat_grps);
+#define DEV_STAT_SCSI_LU_ATTR(_name, _mode)				\
+static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_lu_show_attr_##_name,				\
+	target_stat_scsi_lu_store_attr_##_name);
+
+#define DEV_STAT_SCSI_LU_ATTR_RO(_name)					\
+static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_lu_show_attr_##_name);
+
+static ssize_t target_stat_scsi_lu_show_attr_inst(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_hba *hba = se_subdev->se_dev_hba;
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_lu_show_attr_dev(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_lu_show_attr_indx(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_lu_show_attr_lun(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+	/* FIXME: scsiLuDefaultLun */
+	return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(lun);
+
+static ssize_t target_stat_scsi_lu_show_attr_lu_name(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+	/* scsiLuWwnName */
+	return snprintf(page, PAGE_SIZE, "%s\n",
+			(strlen(DEV_T10_WWN(dev)->unit_serial)) ?
+			(char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None");
+}
+DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
+
+static ssize_t target_stat_scsi_lu_show_attr_vend(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+	int j;
+	char str[28];
+
+	if (!dev)
+		return -ENODEV;
+	/* scsiLuVendorId */
+	memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+	for (j = 0; j < 8; j++)
+		str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
+				DEV_T10_WWN(dev)->vendor[j] : 0x20;
+	str[8] = 0;
+	return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(vend);
+
+static ssize_t target_stat_scsi_lu_show_attr_prod(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+	int j;
+	char str[28];
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuProductId */
+	memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+	for (j = 0; j < 16; j++)
+		str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
+				DEV_T10_WWN(dev)->model[j] : 0x20;
+	str[16] = 0;
+	return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(prod);
+
+static ssize_t target_stat_scsi_lu_show_attr_rev(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+	int j;
+	char str[28];
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuRevisionId */
+	memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+	for (j = 0; j < 4; j++)
+		str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
+				DEV_T10_WWN(dev)->revision[j] : 0x20;
+	str[4] = 0;
+	return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(rev);
+
+static ssize_t target_stat_scsi_lu_show_attr_dev_type(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuPeripheralType */
+	return snprintf(page, PAGE_SIZE, "%u\n",
+			TRANSPORT(dev)->get_device_type(dev));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
+
+static ssize_t target_stat_scsi_lu_show_attr_status(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuStatus */
+	return snprintf(page, PAGE_SIZE, "%s\n",
+		(dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
+		"available" : "notavailable");
+}
+DEV_STAT_SCSI_LU_ATTR_RO(status);
+
+static ssize_t target_stat_scsi_lu_show_attr_state_bit(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuState */
+	return snprintf(page, PAGE_SIZE, "exposed\n");
+}
+DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
+
+static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuNumCommands */
+	return snprintf(page, PAGE_SIZE, "%llu\n",
+			(unsigned long long)dev->num_cmds);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
+
+static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuReadMegaBytes */
+	return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
+
+static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuWrittenMegaBytes */
+	return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
+
+static ssize_t target_stat_scsi_lu_show_attr_resets(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuInResets */
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(resets);
+
+static ssize_t target_stat_scsi_lu_show_attr_full_stat(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* FIXME: scsiLuOutTaskSetFullStatus */
+	return snprintf(page, PAGE_SIZE, "%u\n", 0);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
+
+static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* FIXME: scsiLuHSInCommands */
+	return snprintf(page, PAGE_SIZE, "%u\n", 0);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
+
+static ssize_t target_stat_scsi_lu_show_attr_creation_time(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuCreationTime */
+	return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
+				INITIAL_JIFFIES) * 100 / HZ));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(creation_time);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_lu, se_dev_stat_grps, scsi_lu_group);
+
+static struct configfs_attribute *target_stat_scsi_lu_attrs[] = {
+	&target_stat_scsi_lu_inst.attr,
+	&target_stat_scsi_lu_dev.attr,
+	&target_stat_scsi_lu_indx.attr,
+	&target_stat_scsi_lu_lun.attr,
+	&target_stat_scsi_lu_lu_name.attr,
+	&target_stat_scsi_lu_vend.attr,
+	&target_stat_scsi_lu_prod.attr,
+	&target_stat_scsi_lu_rev.attr,
+	&target_stat_scsi_lu_dev_type.attr,
+	&target_stat_scsi_lu_status.attr,
+	&target_stat_scsi_lu_state_bit.attr,
+	&target_stat_scsi_lu_num_cmds.attr,
+	&target_stat_scsi_lu_read_mbytes.attr,
+	&target_stat_scsi_lu_write_mbytes.attr,
+	&target_stat_scsi_lu_resets.attr,
+	&target_stat_scsi_lu_full_stat.attr,
+	&target_stat_scsi_lu_hs_num_cmds.attr,
+	&target_stat_scsi_lu_creation_time.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = {
+	.show_attribute		= target_stat_scsi_lu_attr_show,
+	.store_attribute	= target_stat_scsi_lu_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_lu_cit = {
+	.ct_item_ops		= &target_stat_scsi_lu_attrib_ops,
+	.ct_attrs		= target_stat_scsi_lu_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Called from target_core_configfs.c:target_core_make_subdev() to setup
+ * the target statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
+{
+	struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group;
+
+	config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group,
+			"scsi_dev", &target_stat_scsi_dev_cit);
+	config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group,
+			"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
+	config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group,
+			"scsi_lu", &target_stat_scsi_lu_cit);
+
+	dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group;
+	dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group;
+	dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group;
+	dev_stat_grp->default_groups[3] = NULL;
+}
+
+/*
+ * SCSI Port Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_port, se_port_stat_grps);
+#define DEV_STAT_SCSI_PORT_ATTR(_name, _mode)				\
+static struct target_stat_scsi_port_attribute				\
+			target_stat_scsi_port_##_name =			\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_port_show_attr_##_name,			\
+	target_stat_scsi_port_store_attr_##_name);
+
+#define DEV_STAT_SCSI_PORT_ATTR_RO(_name)				\
+static struct target_stat_scsi_port_attribute				\
+			target_stat_scsi_port_##_name =			\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_port_show_attr_##_name);
+
+static ssize_t target_stat_scsi_port_show_attr_inst(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_hba *hba;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	hba = dev->se_hba;
+	ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_port_show_attr_dev(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_device *dev = lun->lun_se_dev;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_port_show_attr_indx(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_port_show_attr_role(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_port *sep;
+	ssize_t ret;
+
+	if (!dev)
+		return -ENODEV;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(role);
+
+static ssize_t target_stat_scsi_port_show_attr_busy_count(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	/* FIXME: scsiPortBusyStatuses  */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(busy_count);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_port, se_port_stat_grps, scsi_port_group);
+
+static struct configfs_attribute *target_stat_scsi_port_attrs[] = {
+	&target_stat_scsi_port_inst.attr,
+	&target_stat_scsi_port_dev.attr,
+	&target_stat_scsi_port_indx.attr,
+	&target_stat_scsi_port_role.attr,
+	&target_stat_scsi_port_busy_count.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_port_attrib_ops = {
+	.show_attribute		= target_stat_scsi_port_attr_show,
+	.store_attribute	= target_stat_scsi_port_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_port_cit = {
+	.ct_item_ops		= &target_stat_scsi_port_attrib_ops,
+	.ct_attrs		= target_stat_scsi_port_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Target Port Table
+ */
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_port, se_port_stat_grps);
+#define DEV_STAT_SCSI_TGT_PORT_ATTR(_name, _mode)			\
+static struct target_stat_scsi_tgt_port_attribute			\
+			target_stat_scsi_tgt_port_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_tgt_port_show_attr_##_name,			\
+	target_stat_scsi_tgt_port_store_attr_##_name);
+
+#define DEV_STAT_SCSI_TGT_PORT_ATTR_RO(_name)				\
+static struct target_stat_scsi_tgt_port_attribute			\
+			target_stat_scsi_tgt_port_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_tgt_port_show_attr_##_name);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_inst(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_port *sep;
+	struct se_hba *hba;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	hba = dev->se_hba;
+	ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_dev(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_indx(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_name(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+
+	ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
+		TPG_TFO(tpg)->get_fabric_name(), sep->sep_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+
+	ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
+		TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
+		TPG_TFO(tpg)->tpg_get_tag(tpg));
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+
+	ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			(u32)(sep->sep_stats.rx_data_octets >> 20));
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			(u32)(sep->sep_stats.tx_data_octets >> 20));
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+
+	/* FIXME: scsiTgtPortHsInCommands */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_port, se_port_stat_grps,
+		scsi_tgt_port_group);
+
+static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = {
+	&target_stat_scsi_tgt_port_inst.attr,
+	&target_stat_scsi_tgt_port_dev.attr,
+	&target_stat_scsi_tgt_port_indx.attr,
+	&target_stat_scsi_tgt_port_name.attr,
+	&target_stat_scsi_tgt_port_port_index.attr,
+	&target_stat_scsi_tgt_port_in_cmds.attr,
+	&target_stat_scsi_tgt_port_write_mbytes.attr,
+	&target_stat_scsi_tgt_port_read_mbytes.attr,
+	&target_stat_scsi_tgt_port_hs_in_cmds.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = {
+	.show_attribute		= target_stat_scsi_tgt_port_attr_show,
+	.store_attribute	= target_stat_scsi_tgt_port_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_tgt_port_cit = {
+	.ct_item_ops		= &target_stat_scsi_tgt_port_attrib_ops,
+	.ct_attrs		= target_stat_scsi_tgt_port_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Transport Table
+o */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_transport, se_port_stat_grps);
+#define DEV_STAT_SCSI_TRANSPORT_ATTR(_name, _mode)			\
+static struct target_stat_scsi_transport_attribute			\
+			target_stat_scsi_transport_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_transport_show_attr_##_name,			\
+	target_stat_scsi_transport_store_attr_##_name);
+
+#define DEV_STAT_SCSI_TRANSPORT_ATTR_RO(_name)				\
+static struct target_stat_scsi_transport_attribute			\
+			target_stat_scsi_transport_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_transport_show_attr_##_name);
+
+static ssize_t target_stat_scsi_transport_show_attr_inst(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_port *sep;
+	struct se_hba *hba;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+
+	hba = dev->se_hba;
+	ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_transport_show_attr_device(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+	/* scsiTransportType */
+	ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
+			TPG_TFO(tpg)->get_fabric_name());
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device);
+
+static ssize_t target_stat_scsi_transport_show_attr_indx(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_transport_show_attr_dev_name(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	struct t10_wwn *wwn;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+	wwn = DEV_T10_WWN(dev);
+	/* scsiTransportDevName */
+	ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
+			TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			(strlen(wwn->unit_serial)) ? wwn->unit_serial :
+			wwn->vendor);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_transport, se_port_stat_grps,
+		scsi_transport_group);
+
+static struct configfs_attribute *target_stat_scsi_transport_attrs[] = {
+	&target_stat_scsi_transport_inst.attr,
+	&target_stat_scsi_transport_device.attr,
+	&target_stat_scsi_transport_indx.attr,
+	&target_stat_scsi_transport_dev_name.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = {
+	.show_attribute		= target_stat_scsi_transport_attr_show,
+	.store_attribute	= target_stat_scsi_transport_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_transport_cit = {
+	.ct_item_ops		= &target_stat_scsi_transport_attrib_ops,
+	.ct_attrs		= target_stat_scsi_transport_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup
+ * the target port statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_port_default_groups(struct se_lun *lun)
+{
+	struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+
+	config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group,
+			"scsi_port", &target_stat_scsi_port_cit);
+	config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group,
+			"scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
+	config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group,
+			"scsi_transport", &target_stat_scsi_transport_cit);
+
+	port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group;
+	port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group;
+	port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group;
+	port_stat_grp->default_groups[3] = NULL;
+}
+
+/*
+ * SCSI Authorized Initiator Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_auth_intr, se_ml_stat_grps);
+#define DEV_STAT_SCSI_AUTH_INTR_ATTR(_name, _mode)			\
+static struct target_stat_scsi_auth_intr_attribute			\
+			target_stat_scsi_auth_intr_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_auth_intr_show_attr_##_name,			\
+	target_stat_scsi_auth_intr_store_attr_##_name);
+
+#define DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(_name)				\
+static struct target_stat_scsi_auth_intr_attribute			\
+			target_stat_scsi_auth_intr_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_auth_intr_show_attr_##_name);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiInstIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_dev(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	lun = deve->se_lun;
+	/* scsiDeviceIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_port(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiAuthIntrTgtPortIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_indx(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrDevOrPort */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrName */
+	ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* FIXME: scsiAuthIntrLunMapIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_att_count(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrAttachedTimes */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrOutCommands */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrReadMegaBytes */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrWrittenMegaBytes */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* FIXME: scsiAuthIntrHSOutCommands */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrLastCreation */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
+				INITIAL_JIFFIES) * 100 / HZ));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_row_status(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* FIXME: scsiAuthIntrRowStatus */
+	ret = snprintf(page, PAGE_SIZE, "Ready\n");
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_auth_intr, se_ml_stat_grps,
+		scsi_auth_intr_group);
+
+static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = {
+	&target_stat_scsi_auth_intr_inst.attr,
+	&target_stat_scsi_auth_intr_dev.attr,
+	&target_stat_scsi_auth_intr_port.attr,
+	&target_stat_scsi_auth_intr_indx.attr,
+	&target_stat_scsi_auth_intr_dev_or_port.attr,
+	&target_stat_scsi_auth_intr_intr_name.attr,
+	&target_stat_scsi_auth_intr_map_indx.attr,
+	&target_stat_scsi_auth_intr_att_count.attr,
+	&target_stat_scsi_auth_intr_num_cmds.attr,
+	&target_stat_scsi_auth_intr_read_mbytes.attr,
+	&target_stat_scsi_auth_intr_write_mbytes.attr,
+	&target_stat_scsi_auth_intr_hs_num_cmds.attr,
+	&target_stat_scsi_auth_intr_creation_time.attr,
+	&target_stat_scsi_auth_intr_row_status.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = {
+	.show_attribute		= target_stat_scsi_auth_intr_attr_show,
+	.store_attribute	= target_stat_scsi_auth_intr_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_auth_intr_cit = {
+	.ct_item_ops		= &target_stat_scsi_auth_intr_attrib_ops,
+	.ct_attrs		= target_stat_scsi_auth_intr_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Attached Initiator Port Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_att_intr_port, se_ml_stat_grps);
+#define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR(_name, _mode)			\
+static struct target_stat_scsi_att_intr_port_attribute			\
+		target_stat_scsi_att_intr_port_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_att_intr_port_show_attr_##_name,		\
+	target_stat_scsi_att_intr_port_store_attr_##_name);
+
+#define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(_name)			\
+static struct target_stat_scsi_att_intr_port_attribute			\
+		target_stat_scsi_att_intr_port_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_att_intr_port_show_attr_##_name);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiInstIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_dev(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	lun = deve->se_lun;
+	/* scsiDeviceIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiPortIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_session *se_sess;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->nacl_sess_lock);
+	se_sess = nacl->nacl_sess;
+	if (!se_sess) {
+		spin_unlock_irq(&nacl->nacl_sess_lock);
+		return -ENODEV;
+	}
+
+	tpg = nacl->se_tpg;
+	/* scsiAttIntrPortIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			TPG_TFO(tpg)->sess_get_index(se_sess));
+	spin_unlock_irq(&nacl->nacl_sess_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAttIntrPortAuthIntrIdx */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_session *se_sess;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+	unsigned char buf[64];
+
+	spin_lock_irq(&nacl->nacl_sess_lock);
+	se_sess = nacl->nacl_sess;
+	if (!se_sess) {
+		spin_unlock_irq(&nacl->nacl_sess_lock);
+		return -ENODEV;
+	}
+
+	tpg = nacl->se_tpg;
+	/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
+	memset(buf, 0, 64);
+	if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL)
+		TPG_TFO(tpg)->sess_get_initiator_sid(se_sess,
+				(unsigned char *)&buf[0], 64);
+
+	ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
+	spin_unlock_irq(&nacl->nacl_sess_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_ident);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_att_intr_port, se_ml_stat_grps,
+		scsi_att_intr_port_group);
+
+static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = {
+	&target_stat_scsi_att_intr_port_inst.attr,
+	&target_stat_scsi_att_intr_port_dev.attr,
+	&target_stat_scsi_att_intr_port_port.attr,
+	&target_stat_scsi_att_intr_port_indx.attr,
+	&target_stat_scsi_att_intr_port_port_auth_indx.attr,
+	&target_stat_scsi_att_intr_port_port_ident.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = {
+	.show_attribute		= target_stat_scsi_att_intr_port_attr_show,
+	.store_attribute	= target_stat_scsi_att_intr_port_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_att_intr_port_cit = {
+	.ct_item_ops		= &target_stat_scsi_att_intr_port_attrib_ops,
+	.ct_attrs		= target_stat_scsi_ath_intr_port_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup
+ * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
+{
+	struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+
+	config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group,
+			"scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
+	config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group,
+			"scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
+
+	ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group;
+	ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group;
+	ml_stat_grp->default_groups[2] = NULL;
+}
diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h
new file mode 100644
index 000000000000..86c252f9ea47
--- /dev/null
+++ b/drivers/target/target_core_stat.h
@@ -0,0 +1,8 @@
+#ifndef TARGET_CORE_STAT_H
+#define TARGET_CORE_STAT_H
+
+extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
+extern void target_stat_setup_port_default_groups(struct se_lun *);
+extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
+
+#endif   /*** TARGET_CORE_STAT_H ***/
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ff9ace01e27a..bf6aa8a9f1d8 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -227,8 +227,6 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
 static void transport_stop_all_task_timers(struct se_cmd *cmd);
 
-int transport_emulate_control_cdb(struct se_task *task);
-
 int init_se_global(void)
 {
 	struct se_global *global;
@@ -1622,7 +1620,7 @@ struct se_device *transport_add_device_to_core_hba(
 	const char *inquiry_prod,
 	const char *inquiry_rev)
 {
-	int ret = 0, force_pt;
+	int force_pt;
 	struct se_device  *dev;
 
 	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
@@ -1739,9 +1737,8 @@ struct se_device *transport_add_device_to_core_hba(
 	}
 	scsi_dump_inquiry(dev);
 
+	return dev;
 out:
-	if (!ret)
-		return dev;
 	kthread_stop(dev->process_thread);
 
 	spin_lock(&hba->device_lock);
@@ -4359,11 +4356,9 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
 			printk(KERN_ERR "Unable to allocate struct se_mem\n");
 			goto out;
 		}
-		INIT_LIST_HEAD(&se_mem->se_list);
-		se_mem->se_len = (length > dma_size) ? dma_size : length;
 
 /* #warning FIXME Allocate contigous pages for struct se_mem elements */
-		se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0);
+		se_mem->se_page = alloc_pages(GFP_KERNEL, 0);
 		if (!(se_mem->se_page)) {
 			printk(KERN_ERR "alloc_pages() failed\n");
 			goto out;
@@ -4374,6 +4369,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
 			printk(KERN_ERR "kmap_atomic() failed\n");
 			goto out;
 		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+		se_mem->se_len = (length > dma_size) ? dma_size : length;
 		memset(buf, 0, se_mem->se_len);
 		kunmap_atomic(buf, KM_IRQ0);
 
@@ -4392,10 +4389,13 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
 
 	return 0;
 out:
+	if (se_mem)
+		__free_pages(se_mem->se_page, 0);
+	kmem_cache_free(se_mem_cache, se_mem);
 	return -1;
 }
 
-extern u32 transport_calc_sg_num(
+u32 transport_calc_sg_num(
 	struct se_task *task,
 	struct se_mem *in_se_mem,
 	u32 task_offset)
@@ -5834,31 +5834,26 @@ int transport_generic_do_tmr(struct se_cmd *cmd)
 	int ret;
 
 	switch (tmr->function) {
-	case ABORT_TASK:
+	case TMR_ABORT_TASK:
 		ref_cmd = tmr->ref_cmd;
 		tmr->response = TMR_FUNCTION_REJECTED;
 		break;
-	case ABORT_TASK_SET:
-	case CLEAR_ACA:
-	case CLEAR_TASK_SET:
+	case TMR_ABORT_TASK_SET:
+	case TMR_CLEAR_ACA:
+	case TMR_CLEAR_TASK_SET:
 		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
 		break;
-	case LUN_RESET:
+	case TMR_LUN_RESET:
 		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
 		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
 					 TMR_FUNCTION_REJECTED;
 		break;
-#if 0
-	case TARGET_WARM_RESET:
-		transport_generic_host_reset(dev->se_hba);
+	case TMR_TARGET_WARM_RESET:
 		tmr->response = TMR_FUNCTION_REJECTED;
 		break;
-	case TARGET_COLD_RESET:
-		transport_generic_host_reset(dev->se_hba);
-		transport_generic_cold_reset(dev->se_hba);
+	case TMR_TARGET_COLD_RESET:
 		tmr->response = TMR_FUNCTION_REJECTED;
 		break;
-#endif
 	default:
 		printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
 				tmr->function);
diff --git a/include/scsi/libiscsi_tcp.h b/include/scsi/libiscsi_tcp.h
index 741ae7ed4394..e6b9fd2eea34 100644
--- a/include/scsi/libiscsi_tcp.h
+++ b/include/scsi/libiscsi_tcp.h
@@ -47,6 +47,7 @@ struct iscsi_segment {
 	struct scatterlist	*sg;
 	void			*sg_mapped;
 	unsigned int		sg_offset;
+	bool			atomic_mapped;
 
 	iscsi_segment_done_fn_t	*done;
 };
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index f171c65dc5a8..2d3ec5094685 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -462,7 +462,7 @@ static inline int scsi_device_qas(struct scsi_device *sdev)
 }
 static inline int scsi_device_enclosure(struct scsi_device *sdev)
 {
-	return sdev->inquiry[6] & (1<<6);
+	return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1;
 }
 
 static inline int scsi_device_protection(struct scsi_device *sdev)
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 0828b6c8610a..c15ed5026fb5 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -9,7 +9,7 @@
 #include <net/sock.h>
 #include <net/tcp.h>
 
-#define TARGET_CORE_MOD_VERSION		"v4.0.0-rc6"
+#define TARGET_CORE_MOD_VERSION		"v4.0.0-rc7-ml"
 #define SHUTDOWN_SIGS	(sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
 
 /* Used by transport_generic_allocate_iovecs() */
@@ -239,7 +239,7 @@ struct t10_alua_lu_gp {
 } ____cacheline_aligned;
 
 struct t10_alua_lu_gp_member {
-	int lu_gp_assoc:1;
+	bool lu_gp_assoc;
 	atomic_t lu_gp_mem_ref_cnt;
 	spinlock_t lu_gp_mem_lock;
 	struct t10_alua_lu_gp *lu_gp;
@@ -271,7 +271,7 @@ struct t10_alua_tg_pt_gp {
 } ____cacheline_aligned;
 
 struct t10_alua_tg_pt_gp_member {
-	int tg_pt_gp_assoc:1;
+	bool tg_pt_gp_assoc;
 	atomic_t tg_pt_gp_mem_ref_cnt;
 	spinlock_t tg_pt_gp_mem_lock;
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
@@ -336,7 +336,7 @@ struct t10_pr_registration {
 	int pr_res_type;
 	int pr_res_scope;
 	/* Used for fabric initiator WWPNs using a ISID */
-	int isid_present_at_reg:1;
+	bool isid_present_at_reg;
 	u32 pr_res_mapped_lun;
 	u32 pr_aptpl_target_lun;
 	u32 pr_res_generation;
@@ -418,7 +418,7 @@ struct se_transport_task {
 	unsigned long long	t_task_lba;
 	int			t_tasks_failed;
 	int			t_tasks_fua;
-	int			t_tasks_bidi:1;
+	bool			t_tasks_bidi;
 	u32			t_task_cdbs;
 	u32			t_tasks_check;
 	u32			t_tasks_no;
@@ -470,7 +470,7 @@ struct se_task {
 	u8		task_flags;
 	int		task_error_status;
 	int		task_state_flags;
-	int		task_padded_sg:1;
+	bool		task_padded_sg;
 	unsigned long long	task_lba;
 	u32		task_no;
 	u32		task_sectors;
@@ -494,8 +494,8 @@ struct se_task {
 	struct list_head t_state_list;
 } ____cacheline_aligned;
 
-#define TASK_CMD(task)	((struct se_cmd *)task->task_se_cmd)
-#define TASK_DEV(task)	((struct se_device *)task->se_dev)
+#define TASK_CMD(task)	((task)->task_se_cmd)
+#define TASK_DEV(task)	((task)->se_dev)
 
 struct se_cmd {
 	/* SAM response code being sent to initiator */
@@ -551,8 +551,8 @@ struct se_cmd {
 	void (*transport_complete_callback)(struct se_cmd *);
 } ____cacheline_aligned;
 
-#define T_TASK(cmd)     ((struct se_transport_task *)(cmd->t_task))
-#define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo)
+#define T_TASK(cmd)     ((cmd)->t_task)
+#define CMD_TFO(cmd)	((cmd)->se_tfo)
 
 struct se_tmr_req {
 	/* Task Management function to be preformed */
@@ -583,7 +583,7 @@ struct se_ua {
 struct se_node_acl {
 	char			initiatorname[TRANSPORT_IQN_LEN];
 	/* Used to signal demo mode created ACL, disabled by default */
-	int			dynamic_node_acl:1;
+	bool			dynamic_node_acl;
 	u32			queue_depth;
 	u32			acl_index;
 	u64			num_cmds;
@@ -601,7 +601,8 @@ struct se_node_acl {
 	struct config_group	acl_attrib_group;
 	struct config_group	acl_auth_group;
 	struct config_group	acl_param_group;
-	struct config_group	*acl_default_groups[4];
+	struct config_group	acl_fabric_stat_group;
+	struct config_group	*acl_default_groups[5];
 	struct list_head	acl_list;
 	struct list_head	acl_sess_list;
 } ____cacheline_aligned;
@@ -615,13 +616,19 @@ struct se_session {
 	struct list_head	sess_acl_list;
 } ____cacheline_aligned;
 
-#define SE_SESS(cmd)		((struct se_session *)(cmd)->se_sess)
-#define SE_NODE_ACL(sess)	((struct se_node_acl *)(sess)->se_node_acl)
+#define SE_SESS(cmd)		((cmd)->se_sess)
+#define SE_NODE_ACL(sess)	((sess)->se_node_acl)
 
 struct se_device;
 struct se_transform_info;
 struct scatterlist;
 
+struct se_ml_stat_grps {
+	struct config_group	stat_group;
+	struct config_group	scsi_auth_intr_group;
+	struct config_group	scsi_att_intr_port_group;
+};
+
 struct se_lun_acl {
 	char			initiatorname[TRANSPORT_IQN_LEN];
 	u32			mapped_lun;
@@ -629,10 +636,13 @@ struct se_lun_acl {
 	struct se_lun		*se_lun;
 	struct list_head	lacl_list;
 	struct config_group	se_lun_group;
+	struct se_ml_stat_grps	ml_stat_grps;
 }  ____cacheline_aligned;
 
+#define ML_STAT_GRPS(lacl)	(&(lacl)->ml_stat_grps)
+
 struct se_dev_entry {
-	int			def_pr_registered:1;
+	bool			def_pr_registered;
 	/* See transport_lunflags_table */
 	u32			lun_flags;
 	u32			deve_cmds;
@@ -693,6 +703,13 @@ struct se_dev_attrib {
 	struct config_group da_group;
 } ____cacheline_aligned;
 
+struct se_dev_stat_grps {
+	struct config_group stat_group;
+	struct config_group scsi_dev_group;
+	struct config_group scsi_tgt_dev_group;
+	struct config_group scsi_lu_group;
+};
+
 struct se_subsystem_dev {
 /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
 #define SE_DEV_ALIAS_LEN		512
@@ -716,11 +733,14 @@ struct se_subsystem_dev {
 	struct config_group se_dev_group;
 	/* For T10 Reservations */
 	struct config_group se_dev_pr_group;
+	/* For target_core_stat.c groups */
+	struct se_dev_stat_grps dev_stat_grps;
 } ____cacheline_aligned;
 
 #define T10_ALUA(su_dev)	(&(su_dev)->t10_alua)
 #define T10_RES(su_dev)		(&(su_dev)->t10_reservation)
 #define T10_PR_OPS(su_dev)	(&(su_dev)->t10_reservation.pr_ops)
+#define DEV_STAT_GRP(dev)	(&(dev)->dev_stat_grps)
 
 struct se_device {
 	/* Set to 1 if thread is NOT sleeping on thread_sem */
@@ -803,8 +823,8 @@ struct se_device {
 	struct list_head	g_se_dev_list;
 }  ____cacheline_aligned;
 
-#define SE_DEV(cmd)		((struct se_device *)(cmd)->se_lun->lun_se_dev)
-#define SU_DEV(dev)		((struct se_subsystem_dev *)(dev)->se_sub_dev)
+#define SE_DEV(cmd)		((cmd)->se_lun->lun_se_dev)
+#define SU_DEV(dev)		((dev)->se_sub_dev)
 #define DEV_ATTRIB(dev)		(&(dev)->se_sub_dev->se_dev_attrib)
 #define DEV_T10_WWN(dev)	(&(dev)->se_sub_dev->t10_wwn)
 
@@ -832,7 +852,14 @@ struct se_hba {
 	struct se_subsystem_api *transport;
 }  ____cacheline_aligned;
 
-#define SE_HBA(d)		((struct se_hba *)(d)->se_hba)
+#define SE_HBA(dev)		((dev)->se_hba)
+
+struct se_port_stat_grps {
+	struct config_group stat_group;
+	struct config_group scsi_port_group;
+	struct config_group scsi_tgt_port_group;
+	struct config_group scsi_transport_group;
+};
 
 struct se_lun {
 	/* See transport_lun_status_table */
@@ -848,11 +875,13 @@ struct se_lun {
 	struct list_head	lun_cmd_list;
 	struct list_head	lun_acl_list;
 	struct se_device	*lun_se_dev;
+	struct se_port		*lun_sep;
 	struct config_group	lun_group;
-	struct se_port	*lun_sep;
+	struct se_port_stat_grps port_stat_grps;
 } ____cacheline_aligned;
 
-#define SE_LUN(c)		((struct se_lun *)(c)->se_lun)
+#define SE_LUN(cmd)		((cmd)->se_lun)
+#define PORT_STAT_GRP(lun)	(&(lun)->port_stat_grps)
 
 struct scsi_port_stats {
        u64     cmd_pdus;
@@ -919,11 +948,13 @@ struct se_portal_group {
 	struct config_group	tpg_param_group;
 } ____cacheline_aligned;
 
-#define TPG_TFO(se_tpg)	((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo)
+#define TPG_TFO(se_tpg)	((se_tpg)->se_tpg_tfo)
 
 struct se_wwn {
 	struct target_fabric_configfs *wwn_tf;
 	struct config_group	wwn_group;
+	struct config_group	*wwn_default_groups[2];
+	struct config_group	fabric_stat_group;
 } ____cacheline_aligned;
 
 struct se_global {
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
index 40e6e740527c..612509592ffd 100644
--- a/include/target/target_core_configfs.h
+++ b/include/target/target_core_configfs.h
@@ -14,10 +14,12 @@ extern void target_fabric_configfs_deregister(struct target_fabric_configfs *);
 struct target_fabric_configfs_template {
 	struct config_item_type tfc_discovery_cit;
 	struct config_item_type	tfc_wwn_cit;
+	struct config_item_type tfc_wwn_fabric_stats_cit;
 	struct config_item_type tfc_tpg_cit;
 	struct config_item_type tfc_tpg_base_cit;
 	struct config_item_type tfc_tpg_lun_cit;
 	struct config_item_type tfc_tpg_port_cit;
+	struct config_item_type tfc_tpg_port_stat_cit;
 	struct config_item_type tfc_tpg_np_cit;
 	struct config_item_type tfc_tpg_np_base_cit;
 	struct config_item_type tfc_tpg_attrib_cit;
@@ -27,7 +29,9 @@ struct target_fabric_configfs_template {
 	struct config_item_type tfc_tpg_nacl_attrib_cit;
 	struct config_item_type tfc_tpg_nacl_auth_cit;
 	struct config_item_type tfc_tpg_nacl_param_cit;
+	struct config_item_type tfc_tpg_nacl_stat_cit;
 	struct config_item_type tfc_tpg_mappedlun_cit;
+	struct config_item_type tfc_tpg_mappedlun_stat_cit;
 };
 
 struct target_fabric_configfs {
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
index f3ac12b019c2..5eb8b1ae59d1 100644
--- a/include/target/target_core_fabric_ops.h
+++ b/include/target/target_core_fabric_ops.h
@@ -8,7 +8,7 @@ struct target_core_fabric_ops {
 	 * for scatterlist chaining using transport_do_task_sg_link(),
 	 * disabled by default
 	 */
-	int task_sg_chaining:1;
+	bool task_sg_chaining;
 	char *(*get_fabric_name)(void);
 	u8 (*get_fabric_proto_ident)(struct se_portal_group *);
 	char *(*tpg_get_wwn)(struct se_portal_group *);
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h
index 6c8248bc2c66..bd5596807478 100644
--- a/include/target/target_core_tmr.h
+++ b/include/target/target_core_tmr.h
@@ -1,37 +1,29 @@
 #ifndef TARGET_CORE_TMR_H
 #define TARGET_CORE_TMR_H
 
-/* task management function values */
-#ifdef ABORT_TASK
-#undef ABORT_TASK
-#endif /* ABORT_TASK */
-#define ABORT_TASK				1
-#ifdef ABORT_TASK_SET
-#undef ABORT_TASK_SET
-#endif /* ABORT_TASK_SET */
-#define ABORT_TASK_SET				2
-#ifdef CLEAR_ACA
-#undef CLEAR_ACA
-#endif /* CLEAR_ACA */
-#define CLEAR_ACA				3
-#ifdef CLEAR_TASK_SET
-#undef CLEAR_TASK_SET
-#endif /* CLEAR_TASK_SET */
-#define CLEAR_TASK_SET				4
-#define LUN_RESET				5
-#define TARGET_WARM_RESET			6
-#define TARGET_COLD_RESET			7
-#define TASK_REASSIGN				8
+/* fabric independent task management function values */
+enum tcm_tmreq_table {
+	TMR_ABORT_TASK		= 1,
+	TMR_ABORT_TASK_SET	= 2,
+	TMR_CLEAR_ACA		= 3,
+	TMR_CLEAR_TASK_SET	= 4,
+	TMR_LUN_RESET		= 5,
+	TMR_TARGET_WARM_RESET	= 6,
+	TMR_TARGET_COLD_RESET	= 7,
+	TMR_FABRIC_TMR		= 255,
+};
 
-/* task management response values */
-#define TMR_FUNCTION_COMPLETE			0
-#define TMR_TASK_DOES_NOT_EXIST			1
-#define TMR_LUN_DOES_NOT_EXIST			2
-#define TMR_TASK_STILL_ALLEGIANT		3
-#define TMR_TASK_FAILOVER_NOT_SUPPORTED		4
-#define TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED	5
-#define TMR_FUNCTION_AUTHORIZATION_FAILED	6
-#define TMR_FUNCTION_REJECTED			255
+/* fabric independent task management response values */
+enum tcm_tmrsp_table {
+	TMR_FUNCTION_COMPLETE		= 0,
+	TMR_TASK_DOES_NOT_EXIST		= 1,
+	TMR_LUN_DOES_NOT_EXIST		= 2,
+	TMR_TASK_STILL_ALLEGIANT	= 3,
+	TMR_TASK_FAILOVER_NOT_SUPPORTED	= 4,
+	TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED	= 5,
+	TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
+	TMR_FUNCTION_REJECTED		= 255,
+};
 
 extern struct kmem_cache *se_tmr_req_cache;
 
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 2e8ec51f0615..59aa464f6ee2 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -109,6 +109,8 @@
 struct se_mem;
 struct se_subsystem_api;
 
+extern struct kmem_cache *se_mem_cache;
+
 extern int init_se_global(void);
 extern void release_se_global(void);
 extern void init_scsi_index_table(void);
@@ -190,6 +192,8 @@ extern void transport_generic_process_write(struct se_cmd *);
 extern int transport_generic_do_tmr(struct se_cmd *);
 /* From target_core_alua.c */
 extern int core_alua_check_nonop_delay(struct se_cmd *);
+/* From target_core_cdb.c */
+extern int transport_emulate_control_cdb(struct se_task *);
 
 /*
  * Each se_transport_task_t can have N number of possible struct se_task's