mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
SCSI misc on 20180131
This is mostly updates of the usual driver suspects: arcmsr, scsi_debug, mpt3sas, lpfc, cxlflash, qla2xxx, aacraid, megaraid_sas, hisi_sas. We also have a rework of the libsas hotplug handling to make it more robust, a slew of 32 bit time conversions and fixes, and a host of the usual minor updates and style changes. The biggest potential for regressions is the libsas hotplug changes, but so far they seem stable under testing. Signed-off-by: James E.J. Bottomley <jejb@linux.vnet.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCWnH+5SYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishWxuAP0UvuJp MNR/yU/wv/emSzOc48Ldwd7I0xD2XxSnloGUgwD+IGZZT5yNUQA1THCbm+en4hkB WvyBieQs9qRit+2czd4= =gJMf -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI updates from James Bottomley: "This is mostly updates of the usual driver suspects: arcmsr, scsi_debug, mpt3sas, lpfc, cxlflash, qla2xxx, aacraid, megaraid_sas, hisi_sas. We also have a rework of the libsas hotplug handling to make it more robust, a slew of 32 bit time conversions and fixes, and a host of the usual minor updates and style changes. The biggest potential for regressions is the libsas hotplug changes, but so far they seem stable under testing" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (313 commits) scsi: qla2xxx: Fix logo flag for qlt_free_session_done() scsi: arcmsr: avoid do_gettimeofday scsi: core: Add VENDOR_SPECIFIC sense code definitions scsi: qedi: Drop cqe response during connection recovery scsi: fas216: fix sense buffer initialization scsi: ibmvfc: Remove unneeded semicolons scsi: hisi_sas: fix a bug in hisi_sas_dev_gone() scsi: hisi_sas: directly attached disk LED feature for v2 hw scsi: hisi_sas: devicetree: bindings: add LED feature for v2 hw scsi: megaraid_sas: NVMe passthrough command support scsi: megaraid: use ktime_get_real for firmware time scsi: fnic: use 64-bit timestamps scsi: qedf: Fix error return code in __qedf_probe() scsi: devinfo: fix format of the device list scsi: qla2xxx: Update driver version to 10.00.00.05-k scsi: qla2xxx: Add XCB counters to debugfs scsi: qla2xxx: Fix queue ID for async abort with Multiqueue scsi: qla2xxx: Fix warning for code intentation in __qla24xx_handle_gpdb_event() scsi: qla2xxx: Fix warning during port_name debug print scsi: qla2xxx: Fix warning in qla2x00_async_iocb_timeout() ...
This commit is contained in:
commit
28bc6fb959
@ -8,7 +8,10 @@ Main node required properties:
|
||||
(b) "hisilicon,hip06-sas-v2" for v2 hw in hip06 chipset
|
||||
(c) "hisilicon,hip07-sas-v2" for v2 hw in hip07 chipset
|
||||
- sas-addr : array of 8 bytes for host SAS address
|
||||
- reg : Address and length of the SAS register
|
||||
- reg : Contains two regions. The first is the address and length of the SAS
|
||||
register. The second is the address and length of CPLD register for
|
||||
SGPIO control. The second is optional, and should be set only when
|
||||
we use a CPLD for directly attached disk LED control.
|
||||
- hisilicon,sas-syscon: phandle of syscon used for sas control
|
||||
- ctrl-reset-reg : offset to controller reset register in ctrl reg
|
||||
- ctrl-reset-sts-reg : offset to controller reset status register in ctrl reg
|
||||
|
@ -224,6 +224,14 @@ mid to lowlevel SCSI driver interface
|
||||
.. kernel-doc:: drivers/scsi/hosts.c
|
||||
:export:
|
||||
|
||||
drivers/scsi/scsi_common.c
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
general support functions
|
||||
|
||||
.. kernel-doc:: drivers/scsi/scsi_common.c
|
||||
:export:
|
||||
|
||||
Transport classes
|
||||
-----------------
|
||||
|
||||
@ -332,5 +340,5 @@ todo
|
||||
~~~~
|
||||
|
||||
Parallel (fast/wide/ultra) SCSI, USB, SATA, SAS, Fibre Channel,
|
||||
FireWire, ATAPI devices, Infiniband, I20, iSCSI, Parallel ports,
|
||||
FireWire, ATAPI devices, Infiniband, I2O, iSCSI, Parallel ports,
|
||||
netlink...
|
||||
|
@ -958,7 +958,7 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
|
||||
{
|
||||
u32 mf_dma_addr;
|
||||
int req_offset;
|
||||
u16 req_idx; /* Request index */
|
||||
u16 req_idx; /* Request index */
|
||||
|
||||
/* ensure values are reset properly! */
|
||||
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
|
||||
@ -994,7 +994,7 @@ mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
|
||||
{
|
||||
u32 mf_dma_addr;
|
||||
int req_offset;
|
||||
u16 req_idx; /* Request index */
|
||||
u16 req_idx; /* Request index */
|
||||
|
||||
/* ensure values are reset properly! */
|
||||
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
|
||||
@ -1128,11 +1128,12 @@ mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
|
||||
static void
|
||||
mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
|
||||
{
|
||||
SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
|
||||
pChain->Length = cpu_to_le16(length);
|
||||
pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
|
||||
pChain->NextChainOffset = next;
|
||||
pChain->Address = cpu_to_le32(dma_addr);
|
||||
SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
|
||||
|
||||
pChain->Length = cpu_to_le16(length);
|
||||
pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
|
||||
pChain->NextChainOffset = next;
|
||||
pChain->Address = cpu_to_le32(dma_addr);
|
||||
}
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
@ -1147,18 +1148,18 @@ mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
|
||||
static void
|
||||
mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
|
||||
{
|
||||
SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
|
||||
u32 tmp = dma_addr & 0xFFFFFFFF;
|
||||
SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
|
||||
u32 tmp = dma_addr & 0xFFFFFFFF;
|
||||
|
||||
pChain->Length = cpu_to_le16(length);
|
||||
pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
|
||||
MPI_SGE_FLAGS_64_BIT_ADDRESSING);
|
||||
pChain->Length = cpu_to_le16(length);
|
||||
pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
|
||||
MPI_SGE_FLAGS_64_BIT_ADDRESSING);
|
||||
|
||||
pChain->NextChainOffset = next;
|
||||
pChain->NextChainOffset = next;
|
||||
|
||||
pChain->Address.Low = cpu_to_le32(tmp);
|
||||
tmp = (u32)(upper_32_bits(dma_addr));
|
||||
pChain->Address.High = cpu_to_le32(tmp);
|
||||
pChain->Address.Low = cpu_to_le32(tmp);
|
||||
tmp = (u32)(upper_32_bits(dma_addr));
|
||||
pChain->Address.High = cpu_to_le32(tmp);
|
||||
}
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
@ -1360,7 +1361,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
|
||||
ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
|
||||
ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
@ -2152,7 +2153,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
device_state);
|
||||
|
||||
/* put ioc into READY_STATE */
|
||||
if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
|
||||
if (SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
|
||||
printk(MYIOC_s_ERR_FMT
|
||||
"pci-suspend: IOC msg unit reset failed!\n", ioc->name);
|
||||
}
|
||||
@ -6348,7 +6349,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
|
||||
u8 page_type = 0, extend_page;
|
||||
unsigned long timeleft;
|
||||
unsigned long flags;
|
||||
int in_isr;
|
||||
int in_isr;
|
||||
u8 issue_hard_reset = 0;
|
||||
u8 retry_count = 0;
|
||||
|
||||
@ -7697,7 +7698,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
|
||||
break;
|
||||
}
|
||||
if (ds)
|
||||
strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
|
||||
strlcpy(evStr, ds, EVENT_DESCR_STR_SZ);
|
||||
|
||||
|
||||
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
|
||||
@ -8092,15 +8093,15 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
|
||||
static void
|
||||
mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx)
|
||||
{
|
||||
union loginfo_type {
|
||||
u32 loginfo;
|
||||
struct {
|
||||
u32 subcode:16;
|
||||
u32 code:8;
|
||||
u32 originator:4;
|
||||
u32 bus_type:4;
|
||||
}dw;
|
||||
};
|
||||
union loginfo_type {
|
||||
u32 loginfo;
|
||||
struct {
|
||||
u32 subcode:16;
|
||||
u32 code:8;
|
||||
u32 originator:4;
|
||||
u32 bus_type:4;
|
||||
} dw;
|
||||
};
|
||||
union loginfo_type sas_loginfo;
|
||||
char *originator_desc = NULL;
|
||||
char *code_desc = NULL;
|
||||
|
@ -2481,24 +2481,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
|
||||
else
|
||||
karg.host_no = -1;
|
||||
|
||||
/* Reformat the fw_version into a string
|
||||
*/
|
||||
karg.fw_version[0] = ioc->facts.FWVersion.Struct.Major >= 10 ?
|
||||
((ioc->facts.FWVersion.Struct.Major / 10) + '0') : '0';
|
||||
karg.fw_version[1] = (ioc->facts.FWVersion.Struct.Major % 10 ) + '0';
|
||||
karg.fw_version[2] = '.';
|
||||
karg.fw_version[3] = ioc->facts.FWVersion.Struct.Minor >= 10 ?
|
||||
((ioc->facts.FWVersion.Struct.Minor / 10) + '0') : '0';
|
||||
karg.fw_version[4] = (ioc->facts.FWVersion.Struct.Minor % 10 ) + '0';
|
||||
karg.fw_version[5] = '.';
|
||||
karg.fw_version[6] = ioc->facts.FWVersion.Struct.Unit >= 10 ?
|
||||
((ioc->facts.FWVersion.Struct.Unit / 10) + '0') : '0';
|
||||
karg.fw_version[7] = (ioc->facts.FWVersion.Struct.Unit % 10 ) + '0';
|
||||
karg.fw_version[8] = '.';
|
||||
karg.fw_version[9] = ioc->facts.FWVersion.Struct.Dev >= 10 ?
|
||||
((ioc->facts.FWVersion.Struct.Dev / 10) + '0') : '0';
|
||||
karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0';
|
||||
karg.fw_version[11] = '\0';
|
||||
/* Reformat the fw_version into a string */
|
||||
snprintf(karg.fw_version, sizeof(karg.fw_version),
|
||||
"%.2hhu.%.2hhu.%.2hhu.%.2hhu",
|
||||
ioc->facts.FWVersion.Struct.Major,
|
||||
ioc->facts.FWVersion.Struct.Minor,
|
||||
ioc->facts.FWVersion.Struct.Unit,
|
||||
ioc->facts.FWVersion.Struct.Dev);
|
||||
|
||||
/* Issue a config request to get the device serial number
|
||||
*/
|
||||
|
@ -1165,7 +1165,6 @@ mptsas_schedule_target_reset(void *iocp)
|
||||
* issue target reset to next device in the queue
|
||||
*/
|
||||
|
||||
head = &hd->target_reset_list;
|
||||
if (list_empty(head))
|
||||
return;
|
||||
|
||||
|
@ -369,7 +369,6 @@ out:
|
||||
static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
|
||||
{
|
||||
u32 local_time;
|
||||
struct timeval time;
|
||||
TW_Event *event;
|
||||
unsigned short aen;
|
||||
char host[16];
|
||||
@ -392,8 +391,8 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
|
||||
memset(event, 0, sizeof(TW_Event));
|
||||
|
||||
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
|
||||
do_gettimeofday(&time);
|
||||
local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
|
||||
/* event->time_stamp_sec overflows in y2106 */
|
||||
local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
|
||||
event->time_stamp_sec = local_time;
|
||||
event->aen_code = aen;
|
||||
event->retrieved = TW_AEN_NOT_RETRIEVED;
|
||||
@ -473,11 +472,10 @@ out:
|
||||
static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
|
||||
{
|
||||
u32 schedulertime;
|
||||
struct timeval utc;
|
||||
TW_Command_Full *full_command_packet;
|
||||
TW_Command *command_packet;
|
||||
TW_Param_Apache *param;
|
||||
u32 local_time;
|
||||
time64_t local_time;
|
||||
|
||||
/* Fill out the command packet */
|
||||
full_command_packet = tw_dev->command_packet_virt[request_id];
|
||||
@ -499,9 +497,8 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
|
||||
|
||||
/* Convert system time in UTC to local time seconds since last
|
||||
Sunday 12:00AM */
|
||||
do_gettimeofday(&utc);
|
||||
local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
|
||||
schedulertime = local_time - (3 * 86400);
|
||||
local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
|
||||
div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
|
||||
schedulertime = cpu_to_le32(schedulertime % 604800);
|
||||
|
||||
memcpy(param->data, &schedulertime, sizeof(u32));
|
||||
@ -648,8 +645,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
|
||||
TW_Command_Full *full_command_packet;
|
||||
TW_Compatibility_Info *tw_compat_info;
|
||||
TW_Event *event;
|
||||
struct timeval current_time;
|
||||
u32 current_time_ms;
|
||||
ktime_t current_time;
|
||||
TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
|
||||
int retval = TW_IOCTL_ERROR_OS_EFAULT;
|
||||
void __user *argp = (void __user *)arg;
|
||||
@ -840,17 +836,17 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
|
||||
break;
|
||||
case TW_IOCTL_GET_LOCK:
|
||||
tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
|
||||
do_gettimeofday(¤t_time);
|
||||
current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
|
||||
current_time = ktime_get();
|
||||
|
||||
if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
|
||||
if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
|
||||
ktime_after(current_time, tw_dev->ioctl_time)) {
|
||||
tw_dev->ioctl_sem_lock = 1;
|
||||
tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
|
||||
tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
|
||||
tw_ioctl->driver_command.status = 0;
|
||||
tw_lock->time_remaining_msec = tw_lock->timeout_msec;
|
||||
} else {
|
||||
tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
|
||||
tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
|
||||
tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
|
||||
}
|
||||
break;
|
||||
case TW_IOCTL_RELEASE_LOCK:
|
||||
|
@ -666,7 +666,7 @@ typedef struct TAG_TW_Device_Extension {
|
||||
unsigned char event_queue_wrapped;
|
||||
unsigned int error_sequence_id;
|
||||
int ioctl_sem_lock;
|
||||
u32 ioctl_msec;
|
||||
ktime_t ioctl_time;
|
||||
int chrdev_request_id;
|
||||
wait_queue_head_t ioctl_wqueue;
|
||||
struct mutex ioctl_lock;
|
||||
|
@ -221,7 +221,6 @@ out:
|
||||
static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
|
||||
{
|
||||
u32 local_time;
|
||||
struct timeval time;
|
||||
TW_Event *event;
|
||||
unsigned short aen;
|
||||
char host[16];
|
||||
@ -240,8 +239,8 @@ static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
|
||||
memset(event, 0, sizeof(TW_Event));
|
||||
|
||||
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
|
||||
do_gettimeofday(&time);
|
||||
local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
|
||||
/* event->time_stamp_sec overflows in y2106 */
|
||||
local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
|
||||
event->time_stamp_sec = local_time;
|
||||
event->aen_code = aen;
|
||||
event->retrieved = TW_AEN_NOT_RETRIEVED;
|
||||
@ -408,11 +407,10 @@ out:
|
||||
static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
|
||||
{
|
||||
u32 schedulertime;
|
||||
struct timeval utc;
|
||||
TW_Command_Full *full_command_packet;
|
||||
TW_Command *command_packet;
|
||||
TW_Param_Apache *param;
|
||||
u32 local_time;
|
||||
time64_t local_time;
|
||||
|
||||
/* Fill out the command packet */
|
||||
full_command_packet = tw_dev->command_packet_virt[request_id];
|
||||
@ -434,10 +432,9 @@ static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
|
||||
|
||||
/* Convert system time in UTC to local time seconds since last
|
||||
Sunday 12:00AM */
|
||||
do_gettimeofday(&utc);
|
||||
local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
|
||||
schedulertime = local_time - (3 * 86400);
|
||||
schedulertime = cpu_to_le32(schedulertime % 604800);
|
||||
local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
|
||||
div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
|
||||
schedulertime = cpu_to_le32(schedulertime);
|
||||
|
||||
memcpy(param->data, &schedulertime, sizeof(u32));
|
||||
|
||||
|
@ -42,6 +42,8 @@
|
||||
#include <linux/highmem.h> /* For flush_kernel_dcache_page */
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
@ -913,8 +915,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
|
||||
memset(str, ' ', sizeof(*str));
|
||||
|
||||
if (sup_adap_info->adapter_type_text[0]) {
|
||||
char *cp = sup_adap_info->adapter_type_text;
|
||||
int c;
|
||||
char *cp;
|
||||
char *cname = kmemdup(sup_adap_info->adapter_type_text,
|
||||
sizeof(sup_adap_info->adapter_type_text),
|
||||
GFP_ATOMIC);
|
||||
if (!cname)
|
||||
return;
|
||||
|
||||
cp = cname;
|
||||
if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
|
||||
inqstrcpy("SMC", str->vid);
|
||||
else {
|
||||
@ -923,7 +932,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
|
||||
++cp;
|
||||
c = *cp;
|
||||
*cp = '\0';
|
||||
inqstrcpy(sup_adap_info->adapter_type_text, str->vid);
|
||||
inqstrcpy(cname, str->vid);
|
||||
*cp = c;
|
||||
while (*cp && *cp != ' ')
|
||||
++cp;
|
||||
@ -931,14 +940,11 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
|
||||
while (*cp == ' ')
|
||||
++cp;
|
||||
/* last six chars reserved for vol type */
|
||||
c = 0;
|
||||
if (strlen(cp) > sizeof(str->pid)) {
|
||||
c = cp[sizeof(str->pid)];
|
||||
if (strlen(cp) > sizeof(str->pid))
|
||||
cp[sizeof(str->pid)] = '\0';
|
||||
}
|
||||
inqstrcpy (cp, str->pid);
|
||||
if (c)
|
||||
cp[sizeof(str->pid)] = c;
|
||||
|
||||
kfree(cname);
|
||||
} else {
|
||||
struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
|
||||
|
||||
@ -1660,87 +1666,309 @@ static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
|
||||
(void *) cmd);
|
||||
}
|
||||
|
||||
int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
|
||||
static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
|
||||
struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
|
||||
{
|
||||
struct fib *fibptr;
|
||||
struct aac_srb *srbcmd;
|
||||
struct sgmap64 *sg64;
|
||||
struct aac_ciss_identify_pd *identify_resp;
|
||||
dma_addr_t addr;
|
||||
u32 vbus, vid;
|
||||
u16 fibsize, datasize;
|
||||
int rcode = -ENOMEM;
|
||||
struct fib *fibptr;
|
||||
dma_addr_t addr;
|
||||
int rcode;
|
||||
int fibsize;
|
||||
struct aac_srb *srb;
|
||||
struct aac_srb_reply *srb_reply;
|
||||
struct sgmap64 *sg64;
|
||||
u32 vbus;
|
||||
u32 vid;
|
||||
|
||||
if (!dev->sa_firmware)
|
||||
return 0;
|
||||
|
||||
/* allocate FIB */
|
||||
fibptr = aac_fib_alloc(dev);
|
||||
if (!fibptr)
|
||||
goto out;
|
||||
|
||||
fibsize = sizeof(struct aac_srb) -
|
||||
sizeof(struct sgentry) + sizeof(struct sgentry64);
|
||||
datasize = sizeof(struct aac_ciss_identify_pd);
|
||||
|
||||
identify_resp = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
|
||||
GFP_KERNEL);
|
||||
if (!identify_resp)
|
||||
goto fib_free_ptr;
|
||||
|
||||
vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
|
||||
vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
|
||||
return -ENOMEM;
|
||||
|
||||
aac_fib_init(fibptr);
|
||||
fibptr->hw_fib_va->header.XferState &=
|
||||
~cpu_to_le32(FastResponseCapable);
|
||||
|
||||
srbcmd = (struct aac_srb *) fib_data(fibptr);
|
||||
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
|
||||
srbcmd->channel = cpu_to_le32(vbus);
|
||||
srbcmd->id = cpu_to_le32(vid);
|
||||
srbcmd->lun = 0;
|
||||
srbcmd->flags = cpu_to_le32(SRB_DataIn);
|
||||
srbcmd->timeout = cpu_to_le32(10);
|
||||
srbcmd->retry_limit = 0;
|
||||
srbcmd->cdb_size = cpu_to_le32(12);
|
||||
srbcmd->count = cpu_to_le32(datasize);
|
||||
fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
|
||||
sizeof(struct sgentry64);
|
||||
|
||||
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
|
||||
srbcmd->cdb[0] = 0x26;
|
||||
srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
|
||||
srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
|
||||
/* allocate DMA buffer for response */
|
||||
addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(&dev->pdev->dev, addr)) {
|
||||
rcode = -ENOMEM;
|
||||
goto fib_error;
|
||||
}
|
||||
|
||||
sg64 = (struct sgmap64 *)&srbcmd->sg;
|
||||
sg64->count = cpu_to_le32(1);
|
||||
sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
|
||||
sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
|
||||
sg64->sg[0].count = cpu_to_le32(datasize);
|
||||
srb = fib_data(fibptr);
|
||||
memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
|
||||
|
||||
rcode = aac_fib_send(ScsiPortCommand64,
|
||||
fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
|
||||
vbus = (u32)le16_to_cpu(
|
||||
dev->supplement_adapter_info.virt_device_bus);
|
||||
vid = (u32)le16_to_cpu(
|
||||
dev->supplement_adapter_info.virt_device_target);
|
||||
|
||||
/* set the common request fields */
|
||||
srb->channel = cpu_to_le32(vbus);
|
||||
srb->id = cpu_to_le32(vid);
|
||||
srb->lun = 0;
|
||||
srb->function = cpu_to_le32(SRBF_ExecuteScsi);
|
||||
srb->timeout = 0;
|
||||
srb->retry_limit = 0;
|
||||
srb->cdb_size = cpu_to_le32(16);
|
||||
srb->count = cpu_to_le32(xfer_len);
|
||||
|
||||
sg64 = (struct sgmap64 *)&srb->sg;
|
||||
sg64->count = cpu_to_le32(1);
|
||||
sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
|
||||
sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
|
||||
sg64->sg[0].count = cpu_to_le32(xfer_len);
|
||||
|
||||
/*
|
||||
* Copy the updated data for other dumping or other usage if needed
|
||||
*/
|
||||
memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
|
||||
|
||||
/* issue request to the controller */
|
||||
rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
|
||||
1, 1, NULL, NULL);
|
||||
|
||||
if (rcode == -ERESTARTSYS)
|
||||
rcode = -ERESTART;
|
||||
|
||||
if (unlikely(rcode < 0))
|
||||
goto bmic_error;
|
||||
|
||||
srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
|
||||
memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
|
||||
|
||||
bmic_error:
|
||||
dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
|
||||
fib_error:
|
||||
aac_fib_complete(fibptr);
|
||||
aac_fib_free(fibptr);
|
||||
return rcode;
|
||||
}
|
||||
|
||||
static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
|
||||
{
|
||||
|
||||
struct aac_ciss_identify_pd *identify_resp;
|
||||
|
||||
if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
|
||||
return;
|
||||
|
||||
identify_resp = dev->hba_map[bus][target].safw_identify_resp;
|
||||
if (identify_resp == NULL) {
|
||||
dev->hba_map[bus][target].qd_limit = 32;
|
||||
return;
|
||||
}
|
||||
|
||||
if (identify_resp->current_queue_depth_limit <= 0 ||
|
||||
identify_resp->current_queue_depth_limit > 32)
|
||||
identify_resp->current_queue_depth_limit > 255)
|
||||
dev->hba_map[bus][target].qd_limit = 32;
|
||||
else
|
||||
dev->hba_map[bus][target].qd_limit =
|
||||
identify_resp->current_queue_depth_limit;
|
||||
}
|
||||
|
||||
dma_free_coherent(&dev->pdev->dev, datasize, identify_resp, addr);
|
||||
static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
|
||||
struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
|
||||
{
|
||||
int rcode = -ENOMEM;
|
||||
int datasize;
|
||||
struct aac_srb_unit srbu;
|
||||
struct aac_srb *srbcmd;
|
||||
struct aac_ciss_identify_pd *identify_reply;
|
||||
|
||||
aac_fib_complete(fibptr);
|
||||
datasize = sizeof(struct aac_ciss_identify_pd);
|
||||
identify_reply = kmalloc(datasize, GFP_KERNEL);
|
||||
if (!identify_reply)
|
||||
goto out;
|
||||
|
||||
memset(&srbu, 0, sizeof(struct aac_srb_unit));
|
||||
|
||||
srbcmd = &srbu.srb;
|
||||
srbcmd->flags = cpu_to_le32(SRB_DataIn);
|
||||
srbcmd->cdb[0] = 0x26;
|
||||
srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
|
||||
srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
|
||||
|
||||
rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
|
||||
if (unlikely(rcode < 0))
|
||||
goto mem_free_all;
|
||||
|
||||
*identify_resp = identify_reply;
|
||||
|
||||
fib_free_ptr:
|
||||
aac_fib_free(fibptr);
|
||||
out:
|
||||
return rcode;
|
||||
mem_free_all:
|
||||
kfree(identify_reply);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
|
||||
{
|
||||
kfree(dev->safw_phys_luns);
|
||||
dev->safw_phys_luns = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_update hba_map()- update current hba map with data from FW
|
||||
* aac_get_safw_ciss_luns() Process topology change
|
||||
* @dev: aac_dev structure
|
||||
*
|
||||
* Execute a CISS REPORT PHYS LUNS and process the results into
|
||||
* the current hba_map.
|
||||
*/
|
||||
static int aac_get_safw_ciss_luns(struct aac_dev *dev)
|
||||
{
|
||||
int rcode = -ENOMEM;
|
||||
int datasize;
|
||||
struct aac_srb *srbcmd;
|
||||
struct aac_srb_unit srbu;
|
||||
struct aac_ciss_phys_luns_resp *phys_luns;
|
||||
|
||||
datasize = sizeof(struct aac_ciss_phys_luns_resp) +
|
||||
(AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
|
||||
phys_luns = kmalloc(datasize, GFP_KERNEL);
|
||||
if (phys_luns == NULL)
|
||||
goto out;
|
||||
|
||||
memset(&srbu, 0, sizeof(struct aac_srb_unit));
|
||||
|
||||
srbcmd = &srbu.srb;
|
||||
srbcmd->flags = cpu_to_le32(SRB_DataIn);
|
||||
srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
|
||||
srbcmd->cdb[1] = 2; /* extended reporting */
|
||||
srbcmd->cdb[8] = (u8)(datasize >> 8);
|
||||
srbcmd->cdb[9] = (u8)(datasize);
|
||||
|
||||
rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
|
||||
if (unlikely(rcode < 0))
|
||||
goto mem_free_all;
|
||||
|
||||
if (phys_luns->resp_flag != 2) {
|
||||
rcode = -ENOMSG;
|
||||
goto mem_free_all;
|
||||
}
|
||||
|
||||
dev->safw_phys_luns = phys_luns;
|
||||
|
||||
out:
|
||||
return rcode;
|
||||
mem_free_all:
|
||||
kfree(phys_luns);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
|
||||
{
|
||||
return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
|
||||
}
|
||||
|
||||
static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
|
||||
{
|
||||
return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
|
||||
}
|
||||
|
||||
static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
|
||||
{
|
||||
return dev->safw_phys_luns->lun[lun].level2[0];
|
||||
}
|
||||
|
||||
static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
|
||||
{
|
||||
return dev->safw_phys_luns->lun[lun].bus >> 6;
|
||||
}
|
||||
|
||||
static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
|
||||
{
|
||||
return dev->safw_phys_luns->lun[lun].node_ident[9];
|
||||
}
|
||||
|
||||
static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
|
||||
{
|
||||
return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
|
||||
}
|
||||
|
||||
static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
|
||||
{
|
||||
return dev->safw_phys_luns->lun[lun].node_ident[8];
|
||||
}
|
||||
|
||||
static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
|
||||
int bus, int target)
|
||||
{
|
||||
kfree(dev->hba_map[bus][target].safw_identify_resp);
|
||||
dev->hba_map[bus][target].safw_identify_resp = NULL;
|
||||
}
|
||||
|
||||
static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
|
||||
int lun_count)
|
||||
{
|
||||
int luns;
|
||||
int i;
|
||||
u32 bus;
|
||||
u32 target;
|
||||
|
||||
luns = aac_get_safw_phys_lun_count(dev);
|
||||
|
||||
if (luns < lun_count)
|
||||
lun_count = luns;
|
||||
else if (lun_count < 0)
|
||||
lun_count = luns;
|
||||
|
||||
for (i = 0; i < lun_count; i++) {
|
||||
bus = aac_get_safw_phys_bus(dev, i);
|
||||
target = aac_get_safw_phys_target(dev, i);
|
||||
|
||||
aac_free_safw_identify_resp(dev, bus, target);
|
||||
}
|
||||
}
|
||||
|
||||
static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
|
||||
{
|
||||
int i;
|
||||
int rcode = 0;
|
||||
u32 lun_count;
|
||||
u32 bus;
|
||||
u32 target;
|
||||
struct aac_ciss_identify_pd *identify_resp = NULL;
|
||||
|
||||
lun_count = aac_get_safw_phys_lun_count(dev);
|
||||
|
||||
for (i = 0; i < lun_count; ++i) {
|
||||
|
||||
bus = aac_get_safw_phys_bus(dev, i);
|
||||
target = aac_get_safw_phys_target(dev, i);
|
||||
|
||||
rcode = aac_issue_safw_bmic_identify(dev,
|
||||
&identify_resp, bus, target);
|
||||
|
||||
if (unlikely(rcode < 0))
|
||||
goto free_identify_resp;
|
||||
|
||||
dev->hba_map[bus][target].safw_identify_resp = identify_resp;
|
||||
}
|
||||
|
||||
out:
|
||||
return rcode;
|
||||
free_identify_resp:
|
||||
aac_free_safw_all_identify_resp(dev, i);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_set_safw_attr_all_targets- update current hba map with data from FW
|
||||
* @dev: aac_dev structure
|
||||
* @phys_luns: FW information from report phys luns
|
||||
* @rescan: Indicates scan type
|
||||
*
|
||||
* Update our hba map with the information gathered from the FW
|
||||
*/
|
||||
void aac_update_hba_map(struct aac_dev *dev,
|
||||
struct aac_ciss_phys_luns_resp *phys_luns, int rescan)
|
||||
static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
|
||||
{
|
||||
/* ok and extended reporting */
|
||||
u32 lun_count, nexus;
|
||||
@ -1748,24 +1976,21 @@ void aac_update_hba_map(struct aac_dev *dev,
|
||||
u8 expose_flag, attribs;
|
||||
u8 devtype;
|
||||
|
||||
lun_count = ((phys_luns->list_length[0] << 24)
|
||||
+ (phys_luns->list_length[1] << 16)
|
||||
+ (phys_luns->list_length[2] << 8)
|
||||
+ (phys_luns->list_length[3])) / 24;
|
||||
lun_count = aac_get_safw_phys_lun_count(dev);
|
||||
|
||||
dev->scan_counter++;
|
||||
|
||||
for (i = 0; i < lun_count; ++i) {
|
||||
|
||||
bus = phys_luns->lun[i].level2[1] & 0x3f;
|
||||
target = phys_luns->lun[i].level2[0];
|
||||
expose_flag = phys_luns->lun[i].bus >> 6;
|
||||
attribs = phys_luns->lun[i].node_ident[9];
|
||||
nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]);
|
||||
bus = aac_get_safw_phys_bus(dev, i);
|
||||
target = aac_get_safw_phys_target(dev, i);
|
||||
expose_flag = aac_get_safw_phys_expose_flag(dev, i);
|
||||
attribs = aac_get_safw_phys_attribs(dev, i);
|
||||
nexus = aac_get_safw_phys_nexus(dev, i);
|
||||
|
||||
if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
|
||||
continue;
|
||||
|
||||
dev->hba_map[bus][target].expose = expose_flag;
|
||||
|
||||
if (expose_flag != 0) {
|
||||
devtype = AAC_DEVTYPE_RAID_MEMBER;
|
||||
goto update_devtype;
|
||||
@ -1778,95 +2003,45 @@ void aac_update_hba_map(struct aac_dev *dev,
|
||||
} else
|
||||
devtype = AAC_DEVTYPE_ARC_RAW;
|
||||
|
||||
if (devtype != AAC_DEVTYPE_NATIVE_RAW)
|
||||
goto update_devtype;
|
||||
dev->hba_map[bus][target].scan_counter = dev->scan_counter;
|
||||
|
||||
if (aac_issue_bmic_identify(dev, bus, target) < 0)
|
||||
dev->hba_map[bus][target].qd_limit = 32;
|
||||
aac_set_safw_target_qd(dev, bus, target);
|
||||
|
||||
update_devtype:
|
||||
if (rescan == AAC_INIT)
|
||||
dev->hba_map[bus][target].devtype = devtype;
|
||||
else
|
||||
dev->hba_map[bus][target].new_devtype = devtype;
|
||||
dev->hba_map[bus][target].devtype = devtype;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_report_phys_luns() Process topology change
|
||||
* @dev: aac_dev structure
|
||||
* @fibptr: fib pointer
|
||||
*
|
||||
* Execute a CISS REPORT PHYS LUNS and process the results into
|
||||
* the current hba_map.
|
||||
*/
|
||||
int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
|
||||
static int aac_setup_safw_targets(struct aac_dev *dev)
|
||||
{
|
||||
int fibsize, datasize;
|
||||
struct aac_ciss_phys_luns_resp *phys_luns;
|
||||
struct aac_srb *srbcmd;
|
||||
struct sgmap64 *sg64;
|
||||
dma_addr_t addr;
|
||||
u32 vbus, vid;
|
||||
int rcode = 0;
|
||||
|
||||
/* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
|
||||
fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry)
|
||||
+ sizeof(struct sgentry64);
|
||||
datasize = sizeof(struct aac_ciss_phys_luns_resp)
|
||||
+ (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
|
||||
rcode = aac_get_containers(dev);
|
||||
if (unlikely(rcode < 0))
|
||||
goto out;
|
||||
|
||||
phys_luns = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
|
||||
GFP_KERNEL);
|
||||
if (phys_luns == NULL) {
|
||||
rcode = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
rcode = aac_get_safw_ciss_luns(dev);
|
||||
if (unlikely(rcode < 0))
|
||||
goto out;
|
||||
|
||||
vbus = (u32) le16_to_cpu(
|
||||
dev->supplement_adapter_info.virt_device_bus);
|
||||
vid = (u32) le16_to_cpu(
|
||||
dev->supplement_adapter_info.virt_device_target);
|
||||
rcode = aac_get_safw_attr_all_targets(dev);
|
||||
if (unlikely(rcode < 0))
|
||||
goto free_ciss_luns;
|
||||
|
||||
aac_fib_init(fibptr);
|
||||
aac_set_safw_attr_all_targets(dev);
|
||||
|
||||
srbcmd = (struct aac_srb *) fib_data(fibptr);
|
||||
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
|
||||
srbcmd->channel = cpu_to_le32(vbus);
|
||||
srbcmd->id = cpu_to_le32(vid);
|
||||
srbcmd->lun = 0;
|
||||
srbcmd->flags = cpu_to_le32(SRB_DataIn);
|
||||
srbcmd->timeout = cpu_to_le32(10);
|
||||
srbcmd->retry_limit = 0;
|
||||
srbcmd->cdb_size = cpu_to_le32(12);
|
||||
srbcmd->count = cpu_to_le32(datasize);
|
||||
|
||||
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
|
||||
srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
|
||||
srbcmd->cdb[1] = 2; /* extended reporting */
|
||||
srbcmd->cdb[8] = (u8)(datasize >> 8);
|
||||
srbcmd->cdb[9] = (u8)(datasize);
|
||||
|
||||
sg64 = (struct sgmap64 *) &srbcmd->sg;
|
||||
sg64->count = cpu_to_le32(1);
|
||||
sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
|
||||
sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
|
||||
sg64->sg[0].count = cpu_to_le32(datasize);
|
||||
|
||||
rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize,
|
||||
FsaNormal, 1, 1, NULL, NULL);
|
||||
|
||||
/* analyse data */
|
||||
if (rcode >= 0 && phys_luns->resp_flag == 2) {
|
||||
/* ok and extended reporting */
|
||||
aac_update_hba_map(dev, phys_luns, rescan);
|
||||
}
|
||||
|
||||
dma_free_coherent(&dev->pdev->dev, datasize, phys_luns, addr);
|
||||
err_out:
|
||||
aac_free_safw_all_identify_resp(dev, -1);
|
||||
free_ciss_luns:
|
||||
aac_free_safw_ciss_luns(dev);
|
||||
out:
|
||||
return rcode;
|
||||
}
|
||||
|
||||
int aac_setup_safw_adapter(struct aac_dev *dev)
|
||||
{
|
||||
return aac_setup_safw_targets(dev);
|
||||
}
|
||||
|
||||
int aac_get_adapter_info(struct aac_dev* dev)
|
||||
{
|
||||
struct fib* fibptr;
|
||||
@ -1969,12 +2144,6 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
||||
dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
|
||||
}
|
||||
|
||||
if (!dev->sync_mode && dev->sa_firmware &&
|
||||
dev->supplement_adapter_info.virt_device_bus != 0xffff) {
|
||||
/* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
|
||||
rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
|
||||
}
|
||||
|
||||
if (!dev->in_reset) {
|
||||
char buffer[16];
|
||||
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
|
||||
@ -2739,14 +2908,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
}
|
||||
} else { /* check for physical non-dasd devices */
|
||||
bus = aac_logical_to_phys(scmd_channel(scsicmd));
|
||||
if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
|
||||
(dev->hba_map[bus][cid].expose
|
||||
== AAC_HIDE_DISK)){
|
||||
if (scsicmd->cmnd[0] == INQUIRY) {
|
||||
scsicmd->result = DID_NO_CONNECT << 16;
|
||||
goto scsi_done_ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
|
||||
dev->hba_map[bus][cid].devtype
|
||||
|
@ -41,6 +41,7 @@
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pci.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
|
||||
/*------------------------------------------------------------------------------
|
||||
* D E F I N E S
|
||||
@ -97,7 +98,7 @@ enum {
|
||||
#define PMC_GLOBAL_INT_BIT0 0x00000001
|
||||
|
||||
#ifndef AAC_DRIVER_BUILD
|
||||
# define AAC_DRIVER_BUILD 50834
|
||||
# define AAC_DRIVER_BUILD 50877
|
||||
# define AAC_DRIVER_BRANCH "-custom"
|
||||
#endif
|
||||
#define MAXIMUM_NUM_CONTAINERS 32
|
||||
@ -117,9 +118,13 @@ enum {
|
||||
/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */
|
||||
#define AAC_MAX_BUSES 5
|
||||
#define AAC_MAX_TARGETS 256
|
||||
#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
|
||||
#define AAC_MAX_NATIVE_SIZE 2048
|
||||
#define FW_ERROR_BUFFER_SIZE 512
|
||||
|
||||
#define get_bus_number(x) (x/AAC_MAX_TARGETS)
|
||||
#define get_target_number(x) (x%AAC_MAX_TARGETS)
|
||||
|
||||
/* Thor AIF events */
|
||||
#define SA_AIF_HOTPLUG (1<<1)
|
||||
#define SA_AIF_HARDWARE (1<<2)
|
||||
@ -1334,17 +1339,17 @@ struct fib {
|
||||
#define AAC_DEVTYPE_RAID_MEMBER 1
|
||||
#define AAC_DEVTYPE_ARC_RAW 2
|
||||
#define AAC_DEVTYPE_NATIVE_RAW 3
|
||||
#define AAC_EXPOSE_DISK 0
|
||||
#define AAC_HIDE_DISK 3
|
||||
|
||||
#define AAC_SAFW_RESCAN_DELAY (10 * HZ)
|
||||
|
||||
struct aac_hba_map_info {
|
||||
__le32 rmw_nexus; /* nexus for native HBA devices */
|
||||
u8 devtype; /* device type */
|
||||
u8 new_devtype;
|
||||
u8 reset_state; /* 0 - no reset, 1..x - */
|
||||
/* after xth TM LUN reset */
|
||||
u16 qd_limit;
|
||||
u8 expose; /*checks if to expose or not*/
|
||||
u32 scan_counter;
|
||||
struct aac_ciss_identify_pd *safw_identify_resp;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1560,6 +1565,7 @@ struct aac_dev
|
||||
spinlock_t fib_lock;
|
||||
|
||||
struct mutex ioctl_mutex;
|
||||
struct mutex scan_mutex;
|
||||
struct aac_queue_block *queues;
|
||||
/*
|
||||
* The user API will use an IOCTL to register itself to receive
|
||||
@ -1605,6 +1611,7 @@ struct aac_dev
|
||||
int maximum_num_channels;
|
||||
struct fsa_dev_info *fsa_dev;
|
||||
struct task_struct *thread;
|
||||
struct delayed_work safw_rescan_work;
|
||||
int cardtype;
|
||||
/*
|
||||
*This lock will protect the two 32-bit
|
||||
@ -1668,9 +1675,11 @@ struct aac_dev
|
||||
u32 vector_cap; /* MSI-X vector capab.*/
|
||||
int msi_enabled; /* MSI/MSI-X enabled */
|
||||
atomic_t msix_counter;
|
||||
u32 scan_counter;
|
||||
struct msix_entry msixentry[AAC_MAX_MSIX];
|
||||
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
|
||||
struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
|
||||
struct aac_ciss_phys_luns_resp *safw_phys_luns;
|
||||
u8 adapter_shutdown;
|
||||
u32 handle_pci_error;
|
||||
bool init_reset;
|
||||
@ -2023,6 +2032,12 @@ struct aac_srb_reply
|
||||
__le32 sense_data_size;
|
||||
u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
|
||||
};
|
||||
|
||||
struct aac_srb_unit {
|
||||
struct aac_srb srb;
|
||||
struct aac_srb_reply srb_reply;
|
||||
};
|
||||
|
||||
/*
|
||||
* SRB Flags
|
||||
*/
|
||||
@ -2627,16 +2642,41 @@ static inline int aac_adapter_check_health(struct aac_dev *dev)
|
||||
return (dev)->a_ops.adapter_check_health(dev);
|
||||
}
|
||||
|
||||
|
||||
int aac_scan_host(struct aac_dev *dev);
|
||||
|
||||
static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
|
||||
{
|
||||
schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
|
||||
}
|
||||
|
||||
static inline void aac_safw_rescan_worker(struct work_struct *work)
|
||||
{
|
||||
struct aac_dev *dev = container_of(to_delayed_work(work),
|
||||
struct aac_dev, safw_rescan_work);
|
||||
|
||||
wait_event(dev->scsi_host_ptr->host_wait,
|
||||
!scsi_host_in_recovery(dev->scsi_host_ptr));
|
||||
|
||||
aac_scan_host(dev);
|
||||
}
|
||||
|
||||
static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
|
||||
{
|
||||
if (dev->sa_firmware)
|
||||
cancel_delayed_work_sync(&dev->safw_rescan_work);
|
||||
}
|
||||
|
||||
/* SCp.phase values */
|
||||
#define AAC_OWNER_MIDLEVEL 0x101
|
||||
#define AAC_OWNER_LOWLEVEL 0x102
|
||||
#define AAC_OWNER_ERROR_HANDLER 0x103
|
||||
#define AAC_OWNER_FIRMWARE 0x106
|
||||
|
||||
void aac_safw_rescan_worker(struct work_struct *work);
|
||||
int aac_acquire_irq(struct aac_dev *dev);
|
||||
void aac_free_irq(struct aac_dev *dev);
|
||||
int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan);
|
||||
int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target);
|
||||
int aac_setup_safw_adapter(struct aac_dev *dev);
|
||||
const char *aac_driverinfo(struct Scsi_Host *);
|
||||
void aac_fib_vector_assign(struct aac_dev *dev);
|
||||
struct fib *aac_fib_alloc(struct aac_dev *dev);
|
||||
|
@ -1052,9 +1052,13 @@ static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
|
||||
if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
|
||||
return -EFAULT;
|
||||
|
||||
retval = aac_reset_adapter(dev, 0, reset.reset_type);
|
||||
return retval;
|
||||
dev->adapter_shutdown = 1;
|
||||
|
||||
mutex_unlock(&dev->ioctl_mutex);
|
||||
retval = aac_reset_adapter(dev, 0, reset.reset_type);
|
||||
mutex_lock(&dev->ioctl_mutex);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
|
||||
|
@ -42,6 +42,8 @@
|
||||
#include <linux/completion.h>
|
||||
#include <linux/mm.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
@ -284,6 +286,38 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
|
||||
q->entries = qsize;
|
||||
}
|
||||
|
||||
static void aac_wait_for_io_completion(struct aac_dev *aac)
|
||||
{
|
||||
unsigned long flagv = 0;
|
||||
int i = 0;
|
||||
|
||||
for (i = 60; i; --i) {
|
||||
struct scsi_device *dev;
|
||||
struct scsi_cmnd *command;
|
||||
int active = 0;
|
||||
|
||||
__shost_for_each_device(dev, aac->scsi_host_ptr) {
|
||||
spin_lock_irqsave(&dev->list_lock, flagv);
|
||||
list_for_each_entry(command, &dev->cmd_list, list) {
|
||||
if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
|
||||
active++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->list_lock, flagv);
|
||||
if (active)
|
||||
break;
|
||||
|
||||
}
|
||||
/*
|
||||
* We can exit If all the commands are complete
|
||||
*/
|
||||
if (active == 0)
|
||||
break;
|
||||
ssleep(1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_send_shutdown - shutdown an adapter
|
||||
* @dev: Adapter to shutdown
|
||||
@ -295,12 +329,10 @@ int aac_send_shutdown(struct aac_dev * dev)
|
||||
{
|
||||
struct fib * fibctx;
|
||||
struct aac_close *cmd;
|
||||
int status;
|
||||
int status = 0;
|
||||
|
||||
fibctx = aac_fib_alloc(dev);
|
||||
if (!fibctx)
|
||||
return -ENOMEM;
|
||||
aac_fib_init(fibctx);
|
||||
if (aac_adapter_check_health(dev))
|
||||
return status;
|
||||
|
||||
if (!dev->adapter_shutdown) {
|
||||
mutex_lock(&dev->ioctl_mutex);
|
||||
@ -308,6 +340,13 @@ int aac_send_shutdown(struct aac_dev * dev)
|
||||
mutex_unlock(&dev->ioctl_mutex);
|
||||
}
|
||||
|
||||
aac_wait_for_io_completion(dev);
|
||||
|
||||
fibctx = aac_fib_alloc(dev);
|
||||
if (!fibctx)
|
||||
return -ENOMEM;
|
||||
aac_fib_init(fibctx);
|
||||
|
||||
cmd = (struct aac_close *) fib_data(fibctx);
|
||||
cmd->command = cpu_to_le32(VM_CloseAll);
|
||||
cmd->cid = cpu_to_le32(0xfffffffe);
|
||||
|
@ -33,6 +33,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pci.h>
|
||||
@ -1629,28 +1630,28 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
|
||||
command->scsi_done(command);
|
||||
}
|
||||
/*
|
||||
* Any Device that was already marked offline needs to be cleaned up
|
||||
* Any Device that was already marked offline needs to be marked
|
||||
* running
|
||||
*/
|
||||
__shost_for_each_device(dev, host) {
|
||||
if (!scsi_device_online(dev)) {
|
||||
sdev_printk(KERN_INFO, dev, "Removing offline device\n");
|
||||
scsi_remove_device(dev);
|
||||
scsi_device_put(dev);
|
||||
}
|
||||
if (!scsi_device_online(dev))
|
||||
scsi_device_set_state(dev, SDEV_RUNNING);
|
||||
}
|
||||
retval = 0;
|
||||
|
||||
out:
|
||||
aac->in_reset = 0;
|
||||
scsi_unblock_requests(host);
|
||||
|
||||
/*
|
||||
* Issue bus rescan to catch any configuration that might have
|
||||
* occurred
|
||||
*/
|
||||
if (!retval) {
|
||||
dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
|
||||
scsi_scan_host(host);
|
||||
if (!retval && !is_kdump_kernel()) {
|
||||
dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
|
||||
aac_schedule_safw_scan_worker(aac);
|
||||
}
|
||||
|
||||
if (jafo) {
|
||||
spin_lock_irq(host->host_lock);
|
||||
}
|
||||
@ -1681,31 +1682,6 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
|
||||
*/
|
||||
host = aac->scsi_host_ptr;
|
||||
scsi_block_requests(host);
|
||||
if (forced < 2) for (retval = 60; retval; --retval) {
|
||||
struct scsi_device * dev;
|
||||
struct scsi_cmnd * command;
|
||||
int active = 0;
|
||||
|
||||
__shost_for_each_device(dev, host) {
|
||||
spin_lock_irqsave(&dev->list_lock, flagv);
|
||||
list_for_each_entry(command, &dev->cmd_list, list) {
|
||||
if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
|
||||
active++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->list_lock, flagv);
|
||||
if (active)
|
||||
break;
|
||||
|
||||
}
|
||||
/*
|
||||
* We can exit If all the commands are complete
|
||||
*/
|
||||
if (active == 0)
|
||||
break;
|
||||
ssleep(1);
|
||||
}
|
||||
|
||||
/* Quiesce build, flush cache, write through mode */
|
||||
if (forced < 2)
|
||||
@ -1874,42 +1850,124 @@ out:
|
||||
return BlinkLED;
|
||||
}
|
||||
|
||||
|
||||
static void aac_resolve_luns(struct aac_dev *dev)
|
||||
static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
|
||||
{
|
||||
return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
|
||||
}
|
||||
|
||||
static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
|
||||
int bus,
|
||||
int target)
|
||||
{
|
||||
if (bus != CONTAINER_CHANNEL)
|
||||
bus = aac_phys_to_logical(bus);
|
||||
|
||||
return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
|
||||
}
|
||||
|
||||
static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
|
||||
{
|
||||
if (bus != CONTAINER_CHANNEL)
|
||||
bus = aac_phys_to_logical(bus);
|
||||
|
||||
return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
|
||||
}
|
||||
|
||||
static void aac_put_safw_scsi_device(struct scsi_device *sdev)
|
||||
{
|
||||
if (sdev)
|
||||
scsi_device_put(sdev);
|
||||
}
|
||||
|
||||
static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
|
||||
{
|
||||
int bus, target, channel;
|
||||
struct scsi_device *sdev;
|
||||
u8 devtype;
|
||||
u8 new_devtype;
|
||||
|
||||
for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
|
||||
for (target = 0; target < AAC_MAX_TARGETS; target++) {
|
||||
sdev = aac_lookup_safw_scsi_device(dev, bus, target);
|
||||
scsi_remove_device(sdev);
|
||||
aac_put_safw_scsi_device(sdev);
|
||||
}
|
||||
|
||||
if (bus == CONTAINER_CHANNEL)
|
||||
channel = CONTAINER_CHANNEL;
|
||||
else
|
||||
channel = aac_phys_to_logical(bus);
|
||||
static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
|
||||
int bus, int target)
|
||||
{
|
||||
return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
|
||||
}
|
||||
|
||||
devtype = dev->hba_map[bus][target].devtype;
|
||||
new_devtype = dev->hba_map[bus][target].new_devtype;
|
||||
static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
|
||||
{
|
||||
if (is_safw_raid_volume(dev, bus, target))
|
||||
return dev->fsa_dev[target].valid;
|
||||
else
|
||||
return aac_is_safw_scan_count_equal(dev, bus, target);
|
||||
}
|
||||
|
||||
sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
|
||||
target, 0);
|
||||
static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
|
||||
{
|
||||
int is_exposed = 0;
|
||||
struct scsi_device *sdev;
|
||||
|
||||
if (!sdev && new_devtype)
|
||||
scsi_add_device(dev->scsi_host_ptr, channel,
|
||||
target, 0);
|
||||
else if (sdev && new_devtype != devtype)
|
||||
scsi_remove_device(sdev);
|
||||
else if (sdev && new_devtype == devtype)
|
||||
scsi_rescan_device(&sdev->sdev_gendev);
|
||||
sdev = aac_lookup_safw_scsi_device(dev, bus, target);
|
||||
if (sdev)
|
||||
is_exposed = 1;
|
||||
aac_put_safw_scsi_device(sdev);
|
||||
|
||||
if (sdev)
|
||||
scsi_device_put(sdev);
|
||||
return is_exposed;
|
||||
}
|
||||
|
||||
dev->hba_map[bus][target].devtype = new_devtype;
|
||||
}
|
||||
static int aac_update_safw_host_devices(struct aac_dev *dev)
|
||||
{
|
||||
int i;
|
||||
int bus;
|
||||
int target;
|
||||
int is_exposed = 0;
|
||||
int rcode = 0;
|
||||
|
||||
rcode = aac_setup_safw_adapter(dev);
|
||||
if (unlikely(rcode < 0)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
|
||||
|
||||
bus = get_bus_number(i);
|
||||
target = get_target_number(i);
|
||||
|
||||
is_exposed = aac_is_safw_device_exposed(dev, bus, target);
|
||||
|
||||
if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
|
||||
aac_add_safw_device(dev, bus, target);
|
||||
else if (!aac_is_safw_target_valid(dev, bus, target) &&
|
||||
is_exposed)
|
||||
aac_remove_safw_device(dev, bus, target);
|
||||
}
|
||||
out:
|
||||
return rcode;
|
||||
}
|
||||
|
||||
static int aac_scan_safw_host(struct aac_dev *dev)
|
||||
{
|
||||
int rcode = 0;
|
||||
|
||||
rcode = aac_update_safw_host_devices(dev);
|
||||
if (rcode)
|
||||
aac_schedule_safw_scan_worker(dev);
|
||||
|
||||
return rcode;
|
||||
}
|
||||
|
||||
int aac_scan_host(struct aac_dev *dev)
|
||||
{
|
||||
int rcode = 0;
|
||||
|
||||
mutex_lock(&dev->scan_mutex);
|
||||
if (dev->sa_firmware)
|
||||
rcode = aac_scan_safw_host(dev);
|
||||
else
|
||||
scsi_scan_host(dev->scsi_host_ptr);
|
||||
mutex_unlock(&dev->scan_mutex);
|
||||
|
||||
return rcode;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1922,10 +1980,8 @@ static void aac_resolve_luns(struct aac_dev *dev)
|
||||
*/
|
||||
static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
|
||||
{
|
||||
int i, bus, target, container, rcode = 0;
|
||||
int i;
|
||||
u32 events = 0;
|
||||
struct fib *fib;
|
||||
struct scsi_device *sdev;
|
||||
|
||||
if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
|
||||
events = SA_AIF_HOTPLUG;
|
||||
@ -1947,44 +2003,8 @@ static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
|
||||
case SA_AIF_LDEV_CHANGE:
|
||||
case SA_AIF_BPCFG_CHANGE:
|
||||
|
||||
fib = aac_fib_alloc(dev);
|
||||
if (!fib) {
|
||||
pr_err("aac_handle_sa_aif: out of memory\n");
|
||||
return;
|
||||
}
|
||||
for (bus = 0; bus < AAC_MAX_BUSES; bus++)
|
||||
for (target = 0; target < AAC_MAX_TARGETS; target++)
|
||||
dev->hba_map[bus][target].new_devtype = 0;
|
||||
aac_scan_host(dev);
|
||||
|
||||
rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN);
|
||||
|
||||
if (rcode != -ERESTARTSYS)
|
||||
aac_fib_free(fib);
|
||||
|
||||
aac_resolve_luns(dev);
|
||||
|
||||
if (events == SA_AIF_LDEV_CHANGE ||
|
||||
events == SA_AIF_BPCFG_CHANGE) {
|
||||
aac_get_containers(dev);
|
||||
for (container = 0; container <
|
||||
dev->maximum_num_containers; ++container) {
|
||||
sdev = scsi_device_lookup(dev->scsi_host_ptr,
|
||||
CONTAINER_CHANNEL,
|
||||
container, 0);
|
||||
if (dev->fsa_dev[container].valid && !sdev) {
|
||||
scsi_add_device(dev->scsi_host_ptr,
|
||||
CONTAINER_CHANNEL,
|
||||
container, 0);
|
||||
} else if (!dev->fsa_dev[container].valid &&
|
||||
sdev) {
|
||||
scsi_remove_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
} else if (sdev) {
|
||||
scsi_rescan_device(&sdev->sdev_gendev);
|
||||
scsi_device_put(sdev);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case SA_AIF_BPSTAT_CHANGE:
|
||||
|
@ -683,6 +683,9 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
|
||||
u32 bus, cid;
|
||||
int ret = FAILED;
|
||||
|
||||
if (aac_adapter_check_health(aac))
|
||||
return ret;
|
||||
|
||||
bus = aac_logical_to_phys(scmd_channel(cmd));
|
||||
cid = scmd_id(cmd);
|
||||
if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
|
||||
@ -690,7 +693,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
|
||||
struct aac_hba_tm_req *tmf;
|
||||
int status;
|
||||
u64 address;
|
||||
__le32 managed_request_id;
|
||||
|
||||
pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
|
||||
AAC_DRIVERNAME,
|
||||
@ -703,8 +705,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
|
||||
(fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
|
||||
(fib->callback_data == cmd)) {
|
||||
found = 1;
|
||||
managed_request_id = ((struct aac_hba_cmd_req *)
|
||||
fib->hw_fib_va)->request_id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1375,18 +1375,15 @@ static ssize_t aac_store_reset_adapter(struct device *device,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int retval = -EACCES;
|
||||
int bled = 0;
|
||||
struct aac_dev *aac;
|
||||
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return retval;
|
||||
|
||||
aac = (struct aac_dev *)class_to_shost(device)->hostdata;
|
||||
bled = buf[0] == '!' ? 1:0;
|
||||
retval = aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
|
||||
retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
|
||||
buf[0] == '!', IOP_HWSOFT_RESET);
|
||||
if (retval >= 0)
|
||||
retval = count;
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
@ -1689,6 +1686,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
spin_lock_init(&aac->fib_lock);
|
||||
|
||||
mutex_init(&aac->ioctl_mutex);
|
||||
mutex_init(&aac->scan_mutex);
|
||||
|
||||
INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
|
||||
/*
|
||||
* Map in the registers from the adapter.
|
||||
*/
|
||||
@ -1792,7 +1792,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
error = scsi_add_host(shost, &pdev->dev);
|
||||
if (error)
|
||||
goto out_deinit;
|
||||
scsi_scan_host(shost);
|
||||
|
||||
aac_scan_host(aac);
|
||||
|
||||
pci_enable_pcie_error_reporting(pdev);
|
||||
pci_save_state(pdev);
|
||||
@ -1877,6 +1878,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
|
||||
|
||||
scsi_block_requests(shost);
|
||||
aac_cancel_safw_rescan_worker(aac);
|
||||
aac_send_shutdown(aac);
|
||||
|
||||
aac_release_resources(aac);
|
||||
@ -1935,6 +1937,7 @@ static void aac_remove_one(struct pci_dev *pdev)
|
||||
struct Scsi_Host *shost = pci_get_drvdata(pdev);
|
||||
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
|
||||
|
||||
aac_cancel_safw_rescan_worker(aac);
|
||||
scsi_remove_host(shost);
|
||||
|
||||
__aac_shutdown(aac);
|
||||
@ -1992,6 +1995,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
|
||||
aac->handle_pci_error = 1;
|
||||
|
||||
scsi_block_requests(aac->scsi_host_ptr);
|
||||
aac_cancel_safw_rescan_worker(aac);
|
||||
aac_flush_ios(aac);
|
||||
aac_release_resources(aac);
|
||||
|
||||
@ -2076,7 +2080,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
|
||||
if (sdev->sdev_state == SDEV_OFFLINE)
|
||||
sdev->sdev_state = SDEV_RUNNING;
|
||||
scsi_unblock_requests(aac->scsi_host_ptr);
|
||||
scsi_scan_host(aac->scsi_host_ptr);
|
||||
aac_scan_host(aac);
|
||||
pci_save_state(pdev);
|
||||
|
||||
dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
|
||||
|
@ -329,6 +329,22 @@ int aac_sa_init(struct aac_dev *dev)
|
||||
instance = dev->id;
|
||||
name = dev->name;
|
||||
|
||||
/*
|
||||
* Fill in the function dispatch table.
|
||||
*/
|
||||
|
||||
dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
|
||||
dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
|
||||
dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
|
||||
dev->a_ops.adapter_notify = aac_sa_notify_adapter;
|
||||
dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
|
||||
dev->a_ops.adapter_check_health = aac_sa_check_health;
|
||||
dev->a_ops.adapter_restart = aac_sa_restart_adapter;
|
||||
dev->a_ops.adapter_start = aac_sa_start_adapter;
|
||||
dev->a_ops.adapter_intr = aac_sa_intr;
|
||||
dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
|
||||
dev->a_ops.adapter_ioremap = aac_sa_ioremap;
|
||||
|
||||
if (aac_sa_ioremap(dev, dev->base_size)) {
|
||||
printk(KERN_WARNING "%s: unable to map adapter.\n", name);
|
||||
goto error_iounmap;
|
||||
@ -362,22 +378,6 @@ int aac_sa_init(struct aac_dev *dev)
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill in the function dispatch table.
|
||||
*/
|
||||
|
||||
dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
|
||||
dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
|
||||
dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
|
||||
dev->a_ops.adapter_notify = aac_sa_notify_adapter;
|
||||
dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
|
||||
dev->a_ops.adapter_check_health = aac_sa_check_health;
|
||||
dev->a_ops.adapter_restart = aac_sa_restart_adapter;
|
||||
dev->a_ops.adapter_start = aac_sa_start_adapter;
|
||||
dev->a_ops.adapter_intr = aac_sa_intr;
|
||||
dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
|
||||
dev->a_ops.adapter_ioremap = aac_sa_ioremap;
|
||||
|
||||
/*
|
||||
* First clear out all interrupts. Then enable the one's that
|
||||
* we can handle.
|
||||
|
@ -45,52 +45,57 @@
|
||||
#include <linux/interrupt.h>
|
||||
struct device_attribute;
|
||||
/*The limit of outstanding scsi command that firmware can handle*/
|
||||
#ifdef CONFIG_XEN
|
||||
#define ARCMSR_MAX_FREECCB_NUM 160
|
||||
#define ARCMSR_MAX_OUTSTANDING_CMD 155
|
||||
#else
|
||||
#define ARCMSR_MAX_FREECCB_NUM 320
|
||||
#define ARCMSR_MAX_OUTSTANDING_CMD 255
|
||||
#endif
|
||||
#define ARCMSR_DRIVER_VERSION "v1.30.00.22-20151126"
|
||||
#define ARCMSR_SCSI_INITIATOR_ID 255
|
||||
#define ARCMSR_MAX_XFER_SECTORS 512
|
||||
#define ARCMSR_MAX_XFER_SECTORS_B 4096
|
||||
#define ARCMSR_MAX_XFER_SECTORS_C 304
|
||||
#define ARCMSR_MAX_TARGETID 17
|
||||
#define ARCMSR_MAX_TARGETLUN 8
|
||||
#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
|
||||
#define ARCMSR_MAX_QBUFFER 4096
|
||||
#define ARCMSR_DEFAULT_SG_ENTRIES 38
|
||||
#define ARCMSR_MAX_HBB_POSTQUEUE 264
|
||||
#define ARCMSR_MAX_FREECCB_NUM 1024
|
||||
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
|
||||
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
|
||||
#define ARCMSR_MIN_OUTSTANDING_CMD 32
|
||||
#define ARCMSR_DRIVER_VERSION "v1.40.00.04-20171130"
|
||||
#define ARCMSR_SCSI_INITIATOR_ID 255
|
||||
#define ARCMSR_MAX_XFER_SECTORS 512
|
||||
#define ARCMSR_MAX_XFER_SECTORS_B 4096
|
||||
#define ARCMSR_MAX_XFER_SECTORS_C 304
|
||||
#define ARCMSR_MAX_TARGETID 17
|
||||
#define ARCMSR_MAX_TARGETLUN 8
|
||||
#define ARCMSR_MAX_CMD_PERLUN 128
|
||||
#define ARCMSR_DEFAULT_CMD_PERLUN 32
|
||||
#define ARCMSR_MIN_CMD_PERLUN 1
|
||||
#define ARCMSR_MAX_QBUFFER 4096
|
||||
#define ARCMSR_DEFAULT_SG_ENTRIES 38
|
||||
#define ARCMSR_MAX_HBB_POSTQUEUE 264
|
||||
#define ARCMSR_MAX_ARC1214_POSTQUEUE 256
|
||||
#define ARCMSR_MAX_ARC1214_DONEQUEUE 257
|
||||
#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
|
||||
#define ARCMSR_CDB_SG_PAGE_LENGTH 256
|
||||
#define ARCMSR_MAX_HBE_DONEQUEUE 512
|
||||
#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
|
||||
#define ARCMSR_CDB_SG_PAGE_LENGTH 256
|
||||
#define ARCMST_NUM_MSIX_VECTORS 4
|
||||
#ifndef PCI_DEVICE_ID_ARECA_1880
|
||||
#define PCI_DEVICE_ID_ARECA_1880 0x1880
|
||||
#endif
|
||||
#define PCI_DEVICE_ID_ARECA_1880 0x1880
|
||||
#endif
|
||||
#ifndef PCI_DEVICE_ID_ARECA_1214
|
||||
#define PCI_DEVICE_ID_ARECA_1214 0x1214
|
||||
#define PCI_DEVICE_ID_ARECA_1214 0x1214
|
||||
#endif
|
||||
#ifndef PCI_DEVICE_ID_ARECA_1203
|
||||
#define PCI_DEVICE_ID_ARECA_1203 0x1203
|
||||
#define PCI_DEVICE_ID_ARECA_1203 0x1203
|
||||
#endif
|
||||
#ifndef PCI_DEVICE_ID_ARECA_1884
|
||||
#define PCI_DEVICE_ID_ARECA_1884 0x1884
|
||||
#endif
|
||||
#define ARCMSR_HOURS (1000 * 60 * 60 * 4)
|
||||
#define ARCMSR_MINUTES (1000 * 60 * 60)
|
||||
/*
|
||||
**********************************************************************************
|
||||
**
|
||||
**********************************************************************************
|
||||
*/
|
||||
#define ARC_SUCCESS 0
|
||||
#define ARC_FAILURE 1
|
||||
#define ARC_SUCCESS 0
|
||||
#define ARC_FAILURE 1
|
||||
/*
|
||||
*******************************************************************************
|
||||
** split 64bits dma addressing
|
||||
*******************************************************************************
|
||||
*/
|
||||
#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
|
||||
#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
|
||||
#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
|
||||
#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
|
||||
/*
|
||||
*******************************************************************************
|
||||
** MESSAGE CONTROL CODE
|
||||
@ -130,7 +135,7 @@ struct CMD_MESSAGE_FIELD
|
||||
#define FUNCTION_SAY_HELLO 0x0807
|
||||
#define FUNCTION_SAY_GOODBYE 0x0808
|
||||
#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
|
||||
#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
|
||||
#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
|
||||
#define FUNCTION_HARDWARE_RESET 0x080B
|
||||
/* ARECA IO CONTROL CODE*/
|
||||
#define ARCMSR_MESSAGE_READ_RQBUFFER \
|
||||
@ -161,18 +166,18 @@ struct CMD_MESSAGE_FIELD
|
||||
** structure for holding DMA address data
|
||||
*************************************************************
|
||||
*/
|
||||
#define IS_DMA64 (sizeof(dma_addr_t) == 8)
|
||||
#define IS_SG64_ADDR 0x01000000 /* bit24 */
|
||||
#define IS_DMA64 (sizeof(dma_addr_t) == 8)
|
||||
#define IS_SG64_ADDR 0x01000000 /* bit24 */
|
||||
struct SG32ENTRY
|
||||
{
|
||||
__le32 length;
|
||||
__le32 address;
|
||||
__le32 length;
|
||||
__le32 address;
|
||||
}__attribute__ ((packed));
|
||||
struct SG64ENTRY
|
||||
{
|
||||
__le32 length;
|
||||
__le32 address;
|
||||
__le32 addresshigh;
|
||||
__le32 length;
|
||||
__le32 address;
|
||||
__le32 addresshigh;
|
||||
}__attribute__ ((packed));
|
||||
/*
|
||||
********************************************************************
|
||||
@ -191,50 +196,50 @@ struct QBUFFER
|
||||
*/
|
||||
struct FIRMWARE_INFO
|
||||
{
|
||||
uint32_t signature; /*0, 00-03*/
|
||||
uint32_t request_len; /*1, 04-07*/
|
||||
uint32_t numbers_queue; /*2, 08-11*/
|
||||
uint32_t sdram_size; /*3, 12-15*/
|
||||
uint32_t ide_channels; /*4, 16-19*/
|
||||
char vendor[40]; /*5, 20-59*/
|
||||
char model[8]; /*15, 60-67*/
|
||||
char firmware_ver[16]; /*17, 68-83*/
|
||||
char device_map[16]; /*21, 84-99*/
|
||||
uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
|
||||
uint8_t cfgSerial[16]; /*26,104-119*/
|
||||
uint32_t cfgPicStatus; /*30,120-123*/
|
||||
uint32_t signature; /*0, 00-03*/
|
||||
uint32_t request_len; /*1, 04-07*/
|
||||
uint32_t numbers_queue; /*2, 08-11*/
|
||||
uint32_t sdram_size; /*3, 12-15*/
|
||||
uint32_t ide_channels; /*4, 16-19*/
|
||||
char vendor[40]; /*5, 20-59*/
|
||||
char model[8]; /*15, 60-67*/
|
||||
char firmware_ver[16]; /*17, 68-83*/
|
||||
char device_map[16]; /*21, 84-99*/
|
||||
uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
|
||||
uint8_t cfgSerial[16]; /*26,104-119*/
|
||||
uint32_t cfgPicStatus; /*30,120-123*/
|
||||
};
|
||||
/* signature of set and get firmware config */
|
||||
#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
|
||||
#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
|
||||
#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
|
||||
#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
|
||||
/* message code of inbound message register */
|
||||
#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
|
||||
#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
|
||||
#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
|
||||
#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
|
||||
#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
|
||||
#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
|
||||
#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
|
||||
#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
|
||||
#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
|
||||
#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
|
||||
#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
|
||||
#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
|
||||
#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
|
||||
#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
|
||||
#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
|
||||
#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
|
||||
#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
|
||||
#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
|
||||
/* doorbell interrupt generator */
|
||||
#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
|
||||
#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
|
||||
#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
|
||||
#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
|
||||
#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
|
||||
#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
|
||||
#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
|
||||
#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
|
||||
/* ccb areca cdb flag */
|
||||
#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
|
||||
#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
|
||||
#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
|
||||
#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
|
||||
#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
|
||||
#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
|
||||
#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
|
||||
#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
|
||||
#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
|
||||
#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
|
||||
/* outbound firmware ok */
|
||||
#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
|
||||
#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
|
||||
/* ARC-1680 Bus Reset*/
|
||||
#define ARCMSR_ARC1680_BUS_RESET 0x00000003
|
||||
#define ARCMSR_ARC1680_BUS_RESET 0x00000003
|
||||
/* ARC-1880 Bus Reset*/
|
||||
#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
|
||||
#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
|
||||
#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
|
||||
#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
|
||||
|
||||
/*
|
||||
************************************************************************
|
||||
@ -277,9 +282,10 @@ struct FIRMWARE_INFO
|
||||
#define ARCMSR_MESSAGE_FLUSH_CACHE 0x00050008
|
||||
/* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
|
||||
#define ARCMSR_MESSAGE_START_BGRB 0x00060008
|
||||
#define ARCMSR_MESSAGE_SYNC_TIMER 0x00080008
|
||||
#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
|
||||
#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
|
||||
#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
|
||||
#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
|
||||
/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
|
||||
#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
|
||||
/* ioctl transfer */
|
||||
@ -288,7 +294,7 @@ struct FIRMWARE_INFO
|
||||
#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
|
||||
#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
|
||||
#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
|
||||
#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
|
||||
#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
|
||||
|
||||
/* data tunnel buffer between user space program and its firmware */
|
||||
/* user space data to iop 128bytes */
|
||||
@ -313,12 +319,12 @@ struct FIRMWARE_INFO
|
||||
#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/
|
||||
#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */
|
||||
/* Host Interrupt Status */
|
||||
#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
|
||||
#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
|
||||
/*
|
||||
** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register.
|
||||
** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled).
|
||||
*/
|
||||
#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
|
||||
#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
|
||||
/*
|
||||
** Set if Outbound Doorbell register bits 30:1 have a non-zero
|
||||
** value. This bit clears only when Outbound Doorbell bits
|
||||
@ -331,7 +337,7 @@ struct FIRMWARE_INFO
|
||||
** Register (FIFO) is not empty. It clears when the Outbound
|
||||
** Post List FIFO is empty.
|
||||
*/
|
||||
#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
|
||||
#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
|
||||
/*
|
||||
** This bit indicates a SAS interrupt from a source external to
|
||||
** the PCIe core. This bit is not maskable.
|
||||
@ -340,17 +346,17 @@ struct FIRMWARE_INFO
|
||||
#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002
|
||||
#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004
|
||||
/*inbound message 0 ready*/
|
||||
#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
|
||||
#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
|
||||
/*more than 12 request completed in a time*/
|
||||
#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010
|
||||
#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002
|
||||
/*outbound DATA WRITE isr door bell clear*/
|
||||
#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
|
||||
#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
|
||||
#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004
|
||||
/*outbound DATA READ isr door bell clear*/
|
||||
#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
|
||||
#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
|
||||
/*outbound message 0 ready*/
|
||||
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
|
||||
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
|
||||
/*outbound message cmd isr door bell clear*/
|
||||
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008
|
||||
/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
|
||||
@ -407,18 +413,43 @@ struct FIRMWARE_INFO
|
||||
#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001
|
||||
/*
|
||||
*******************************************************************************
|
||||
** SPEC. for Areca Type E adapter
|
||||
*******************************************************************************
|
||||
*/
|
||||
#define ARCMSR_SIGNATURE_1884 0x188417D3
|
||||
|
||||
#define ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK 0x00000002
|
||||
#define ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK 0x00000004
|
||||
#define ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
|
||||
|
||||
#define ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 0x00000002
|
||||
#define ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 0x00000004
|
||||
#define ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
|
||||
|
||||
#define ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK 0x80000000
|
||||
|
||||
#define ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR 0x00000001
|
||||
#define ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR 0x00000008
|
||||
#define ARCMSR_HBEMU_ALL_INTMASKENABLE 0x00000009
|
||||
|
||||
/* ARC-1884 doorbell sync */
|
||||
#define ARCMSR_HBEMU_DOORBELL_SYNC 0x100
|
||||
#define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004
|
||||
#define ARCMSR_ARC1884_DiagWrite_ENABLE 0x00000080
|
||||
/*
|
||||
*******************************************************************************
|
||||
** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
|
||||
*******************************************************************************
|
||||
*/
|
||||
struct ARCMSR_CDB
|
||||
{
|
||||
uint8_t Bus;
|
||||
uint8_t TargetID;
|
||||
uint8_t LUN;
|
||||
uint8_t Function;
|
||||
uint8_t CdbLength;
|
||||
uint8_t sgcount;
|
||||
uint8_t Flags;
|
||||
uint8_t Bus;
|
||||
uint8_t TargetID;
|
||||
uint8_t LUN;
|
||||
uint8_t Function;
|
||||
uint8_t CdbLength;
|
||||
uint8_t sgcount;
|
||||
uint8_t Flags;
|
||||
#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01
|
||||
#define ARCMSR_CDB_FLAG_BIOS 0x02
|
||||
#define ARCMSR_CDB_FLAG_WRITE 0x04
|
||||
@ -426,21 +457,21 @@ struct ARCMSR_CDB
|
||||
#define ARCMSR_CDB_FLAG_HEADQ 0x08
|
||||
#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
|
||||
|
||||
uint8_t msgPages;
|
||||
uint32_t msgContext;
|
||||
uint32_t DataLength;
|
||||
uint8_t Cdb[16];
|
||||
uint8_t DeviceStatus;
|
||||
uint8_t msgPages;
|
||||
uint32_t msgContext;
|
||||
uint32_t DataLength;
|
||||
uint8_t Cdb[16];
|
||||
uint8_t DeviceStatus;
|
||||
#define ARCMSR_DEV_CHECK_CONDITION 0x02
|
||||
#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
|
||||
#define ARCMSR_DEV_ABORTED 0xF1
|
||||
#define ARCMSR_DEV_INIT_FAIL 0xF2
|
||||
|
||||
uint8_t SenseData[15];
|
||||
uint8_t SenseData[15];
|
||||
union
|
||||
{
|
||||
struct SG32ENTRY sg32entry[1];
|
||||
struct SG64ENTRY sg64entry[1];
|
||||
struct SG32ENTRY sg32entry[1];
|
||||
struct SG64ENTRY sg64entry[1];
|
||||
} u;
|
||||
};
|
||||
/*
|
||||
@ -480,13 +511,13 @@ struct MessageUnit_B
|
||||
uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
|
||||
uint32_t postq_index;
|
||||
uint32_t doneq_index;
|
||||
uint32_t __iomem *drv2iop_doorbell;
|
||||
uint32_t __iomem *drv2iop_doorbell_mask;
|
||||
uint32_t __iomem *iop2drv_doorbell;
|
||||
uint32_t __iomem *iop2drv_doorbell_mask;
|
||||
uint32_t __iomem *message_rwbuffer;
|
||||
uint32_t __iomem *message_wbuffer;
|
||||
uint32_t __iomem *message_rbuffer;
|
||||
uint32_t __iomem *drv2iop_doorbell;
|
||||
uint32_t __iomem *drv2iop_doorbell_mask;
|
||||
uint32_t __iomem *iop2drv_doorbell;
|
||||
uint32_t __iomem *iop2drv_doorbell_mask;
|
||||
uint32_t __iomem *message_rwbuffer;
|
||||
uint32_t __iomem *message_wbuffer;
|
||||
uint32_t __iomem *message_rbuffer;
|
||||
};
|
||||
/*
|
||||
*********************************************************************
|
||||
@ -506,7 +537,7 @@ struct MessageUnit_C{
|
||||
uint32_t diagnostic_rw_data; /*0024 0027*/
|
||||
uint32_t diagnostic_rw_address_low; /*0028 002B*/
|
||||
uint32_t diagnostic_rw_address_high; /*002C 002F*/
|
||||
uint32_t host_int_status; /*0030 0033*/
|
||||
uint32_t host_int_status; /*0030 0033*/
|
||||
uint32_t host_int_mask; /*0034 0037*/
|
||||
uint32_t dcr_data; /*0038 003B*/
|
||||
uint32_t dcr_address; /*003C 003F*/
|
||||
@ -518,12 +549,12 @@ struct MessageUnit_C{
|
||||
uint32_t iop_int_mask; /*0054 0057*/
|
||||
uint32_t iop_inbound_queue_port; /*0058 005B*/
|
||||
uint32_t iop_outbound_queue_port; /*005C 005F*/
|
||||
uint32_t inbound_free_list_index; /*0060 0063*/
|
||||
uint32_t inbound_post_list_index; /*0064 0067*/
|
||||
uint32_t outbound_free_list_index; /*0068 006B*/
|
||||
uint32_t outbound_post_list_index; /*006C 006F*/
|
||||
uint32_t inbound_free_list_index; /*0060 0063*/
|
||||
uint32_t inbound_post_list_index; /*0064 0067*/
|
||||
uint32_t outbound_free_list_index; /*0068 006B*/
|
||||
uint32_t outbound_post_list_index; /*006C 006F*/
|
||||
uint32_t inbound_doorbell_clear; /*0070 0073*/
|
||||
uint32_t i2o_message_unit_control; /*0074 0077*/
|
||||
uint32_t i2o_message_unit_control; /*0074 0077*/
|
||||
uint32_t last_used_message_source_address_low; /*0078 007B*/
|
||||
uint32_t last_used_message_source_address_high; /*007C 007F*/
|
||||
uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
|
||||
@ -531,7 +562,7 @@ struct MessageUnit_C{
|
||||
uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
|
||||
uint32_t utility_A_int_counter_timer; /*0098 009B*/
|
||||
uint32_t outbound_doorbell; /*009C 009F*/
|
||||
uint32_t outbound_doorbell_clear; /*00A0 00A3*/
|
||||
uint32_t outbound_doorbell_clear; /*00A0 00A3*/
|
||||
uint32_t message_source_address_index; /*00A4 00A7*/
|
||||
uint32_t message_done_queue_index; /*00A8 00AB*/
|
||||
uint32_t reserved0; /*00AC 00AF*/
|
||||
@ -553,10 +584,10 @@ struct MessageUnit_C{
|
||||
uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
|
||||
uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
|
||||
uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
|
||||
uint32_t host_diagnostic; /*00F8 00FB*/
|
||||
uint32_t host_diagnostic; /*00F8 00FB*/
|
||||
uint32_t write_sequence; /*00FC 00FF*/
|
||||
uint32_t reserved1[34]; /*0100 0187*/
|
||||
uint32_t reserved2[1950]; /*0188 1FFF*/
|
||||
uint32_t reserved2[1950]; /*0188 1FFF*/
|
||||
uint32_t message_wbuffer[32]; /*2000 207F*/
|
||||
uint32_t reserved3[32]; /*2080 20FF*/
|
||||
uint32_t message_rbuffer[32]; /*2100 217F*/
|
||||
@ -614,115 +645,208 @@ struct MessageUnit_D {
|
||||
u32 __iomem *msgcode_rwbuffer; /* 0x2200 */
|
||||
};
|
||||
/*
|
||||
*********************************************************************
|
||||
** Messaging Unit (MU) of Type E processor(LSI)
|
||||
*********************************************************************
|
||||
*/
|
||||
struct MessageUnit_E{
|
||||
uint32_t iobound_doorbell; /*0000 0003*/
|
||||
uint32_t write_sequence_3xxx; /*0004 0007*/
|
||||
uint32_t host_diagnostic_3xxx; /*0008 000B*/
|
||||
uint32_t posted_outbound_doorbell; /*000C 000F*/
|
||||
uint32_t master_error_attribute; /*0010 0013*/
|
||||
uint32_t master_error_address_low; /*0014 0017*/
|
||||
uint32_t master_error_address_high; /*0018 001B*/
|
||||
uint32_t hcb_size; /*001C 001F*/
|
||||
uint32_t inbound_doorbell; /*0020 0023*/
|
||||
uint32_t diagnostic_rw_data; /*0024 0027*/
|
||||
uint32_t diagnostic_rw_address_low; /*0028 002B*/
|
||||
uint32_t diagnostic_rw_address_high; /*002C 002F*/
|
||||
uint32_t host_int_status; /*0030 0033*/
|
||||
uint32_t host_int_mask; /*0034 0037*/
|
||||
uint32_t dcr_data; /*0038 003B*/
|
||||
uint32_t dcr_address; /*003C 003F*/
|
||||
uint32_t inbound_queueport; /*0040 0043*/
|
||||
uint32_t outbound_queueport; /*0044 0047*/
|
||||
uint32_t hcb_pci_address_low; /*0048 004B*/
|
||||
uint32_t hcb_pci_address_high; /*004C 004F*/
|
||||
uint32_t iop_int_status; /*0050 0053*/
|
||||
uint32_t iop_int_mask; /*0054 0057*/
|
||||
uint32_t iop_inbound_queue_port; /*0058 005B*/
|
||||
uint32_t iop_outbound_queue_port; /*005C 005F*/
|
||||
uint32_t inbound_free_list_index; /*0060 0063*/
|
||||
uint32_t inbound_post_list_index; /*0064 0067*/
|
||||
uint32_t reply_post_producer_index; /*0068 006B*/
|
||||
uint32_t reply_post_consumer_index; /*006C 006F*/
|
||||
uint32_t inbound_doorbell_clear; /*0070 0073*/
|
||||
uint32_t i2o_message_unit_control; /*0074 0077*/
|
||||
uint32_t last_used_message_source_address_low; /*0078 007B*/
|
||||
uint32_t last_used_message_source_address_high; /*007C 007F*/
|
||||
uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
|
||||
uint32_t message_dest_address_index; /*0090 0093*/
|
||||
uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
|
||||
uint32_t utility_A_int_counter_timer; /*0098 009B*/
|
||||
uint32_t outbound_doorbell; /*009C 009F*/
|
||||
uint32_t outbound_doorbell_clear; /*00A0 00A3*/
|
||||
uint32_t message_source_address_index; /*00A4 00A7*/
|
||||
uint32_t message_done_queue_index; /*00A8 00AB*/
|
||||
uint32_t reserved0; /*00AC 00AF*/
|
||||
uint32_t inbound_msgaddr0; /*00B0 00B3*/
|
||||
uint32_t inbound_msgaddr1; /*00B4 00B7*/
|
||||
uint32_t outbound_msgaddr0; /*00B8 00BB*/
|
||||
uint32_t outbound_msgaddr1; /*00BC 00BF*/
|
||||
uint32_t inbound_queueport_low; /*00C0 00C3*/
|
||||
uint32_t inbound_queueport_high; /*00C4 00C7*/
|
||||
uint32_t outbound_queueport_low; /*00C8 00CB*/
|
||||
uint32_t outbound_queueport_high; /*00CC 00CF*/
|
||||
uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
|
||||
uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
|
||||
uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
|
||||
uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
|
||||
uint32_t message_dest_queue_port_low; /*00E0 00E3*/
|
||||
uint32_t message_dest_queue_port_high; /*00E4 00E7*/
|
||||
uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
|
||||
uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
|
||||
uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
|
||||
uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
|
||||
uint32_t host_diagnostic; /*00F8 00FB*/
|
||||
uint32_t write_sequence; /*00FC 00FF*/
|
||||
uint32_t reserved1[34]; /*0100 0187*/
|
||||
uint32_t reserved2[1950]; /*0188 1FFF*/
|
||||
uint32_t message_wbuffer[32]; /*2000 207F*/
|
||||
uint32_t reserved3[32]; /*2080 20FF*/
|
||||
uint32_t message_rbuffer[32]; /*2100 217F*/
|
||||
uint32_t reserved4[32]; /*2180 21FF*/
|
||||
uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
|
||||
};
|
||||
|
||||
typedef struct deliver_completeQ {
|
||||
uint16_t cmdFlag;
|
||||
uint16_t cmdSMID;
|
||||
uint16_t cmdLMID; // reserved (0)
|
||||
uint16_t cmdFlag2; // reserved (0)
|
||||
} DeliverQ, CompletionQ, *pDeliver_Q, *pCompletion_Q;
|
||||
/*
|
||||
*******************************************************************************
|
||||
** Adapter Control Block
|
||||
*******************************************************************************
|
||||
*/
|
||||
struct AdapterControlBlock
|
||||
{
|
||||
uint32_t adapter_type; /* adapter A,B..... */
|
||||
#define ACB_ADAPTER_TYPE_A 0x00000001 /* hba I IOP */
|
||||
#define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
|
||||
#define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */
|
||||
#define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */
|
||||
u32 roundup_ccbsize;
|
||||
struct pci_dev * pdev;
|
||||
struct Scsi_Host * host;
|
||||
unsigned long vir2phy_offset;
|
||||
uint32_t adapter_type; /* adapter A,B..... */
|
||||
#define ACB_ADAPTER_TYPE_A 0x00000000 /* hba I IOP */
|
||||
#define ACB_ADAPTER_TYPE_B 0x00000001 /* hbb M IOP */
|
||||
#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */
|
||||
#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */
|
||||
#define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */
|
||||
u32 roundup_ccbsize;
|
||||
struct pci_dev * pdev;
|
||||
struct Scsi_Host * host;
|
||||
unsigned long vir2phy_offset;
|
||||
/* Offset is used in making arc cdb physical to virtual calculations */
|
||||
uint32_t outbound_int_enable;
|
||||
uint32_t cdb_phyaddr_hi32;
|
||||
uint32_t reg_mu_acc_handle0;
|
||||
spinlock_t eh_lock;
|
||||
spinlock_t ccblist_lock;
|
||||
spinlock_t postq_lock;
|
||||
spinlock_t doneq_lock;
|
||||
spinlock_t rqbuffer_lock;
|
||||
spinlock_t wqbuffer_lock;
|
||||
uint32_t outbound_int_enable;
|
||||
uint32_t cdb_phyaddr_hi32;
|
||||
uint32_t reg_mu_acc_handle0;
|
||||
spinlock_t eh_lock;
|
||||
spinlock_t ccblist_lock;
|
||||
spinlock_t postq_lock;
|
||||
spinlock_t doneq_lock;
|
||||
spinlock_t rqbuffer_lock;
|
||||
spinlock_t wqbuffer_lock;
|
||||
union {
|
||||
struct MessageUnit_A __iomem *pmuA;
|
||||
struct MessageUnit_B *pmuB;
|
||||
struct MessageUnit_C __iomem *pmuC;
|
||||
struct MessageUnit_D *pmuD;
|
||||
struct MessageUnit_E __iomem *pmuE;
|
||||
};
|
||||
/* message unit ATU inbound base address0 */
|
||||
void __iomem *mem_base0;
|
||||
void __iomem *mem_base1;
|
||||
uint32_t acb_flags;
|
||||
void __iomem *mem_base0;
|
||||
void __iomem *mem_base1;
|
||||
uint32_t acb_flags;
|
||||
u16 dev_id;
|
||||
uint8_t adapter_index;
|
||||
#define ACB_F_SCSISTOPADAPTER 0x0001
|
||||
#define ACB_F_MSG_STOP_BGRB 0x0002
|
||||
/* stop RAID background rebuild */
|
||||
#define ACB_F_MSG_START_BGRB 0x0004
|
||||
/* stop RAID background rebuild */
|
||||
#define ACB_F_IOPDATA_OVERFLOW 0x0008
|
||||
/* iop message data rqbuffer overflow */
|
||||
#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
|
||||
/* message clear wqbuffer */
|
||||
#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
|
||||
/* message clear rqbuffer */
|
||||
#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
|
||||
#define ACB_F_BUS_RESET 0x0080
|
||||
#define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
|
||||
uint8_t adapter_index;
|
||||
#define ACB_F_SCSISTOPADAPTER 0x0001
|
||||
#define ACB_F_MSG_STOP_BGRB 0x0002
|
||||
/* stop RAID background rebuild */
|
||||
#define ACB_F_MSG_START_BGRB 0x0004
|
||||
/* stop RAID background rebuild */
|
||||
#define ACB_F_IOPDATA_OVERFLOW 0x0008
|
||||
/* iop message data rqbuffer overflow */
|
||||
#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
|
||||
/* message clear wqbuffer */
|
||||
#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
|
||||
/* message clear rqbuffer */
|
||||
#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
|
||||
#define ACB_F_BUS_RESET 0x0080
|
||||
#define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
|
||||
|
||||
#define ACB_F_IOP_INITED 0x0100
|
||||
/* iop init */
|
||||
#define ACB_F_ABORT 0x0200
|
||||
#define ACB_F_FIRMWARE_TRAP 0x0400
|
||||
struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
|
||||
#define ACB_F_IOP_INITED 0x0100
|
||||
/* iop init */
|
||||
#define ACB_F_ABORT 0x0200
|
||||
#define ACB_F_FIRMWARE_TRAP 0x0400
|
||||
#define ACB_F_MSG_GET_CONFIG 0x1000
|
||||
struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
|
||||
/* used for memory free */
|
||||
struct list_head ccb_free_list;
|
||||
struct list_head ccb_free_list;
|
||||
/* head of free ccb list */
|
||||
|
||||
atomic_t ccboutstandingcount;
|
||||
atomic_t ccboutstandingcount;
|
||||
/*The present outstanding command number that in the IOP that
|
||||
waiting for being handled by FW*/
|
||||
|
||||
void * dma_coherent;
|
||||
void * dma_coherent;
|
||||
/* dma_coherent used for memory free */
|
||||
dma_addr_t dma_coherent_handle;
|
||||
dma_addr_t dma_coherent_handle;
|
||||
/* dma_coherent_handle used for memory free */
|
||||
dma_addr_t dma_coherent_handle2;
|
||||
void *dma_coherent2;
|
||||
unsigned int uncache_size;
|
||||
uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
|
||||
dma_addr_t dma_coherent_handle2;
|
||||
void *dma_coherent2;
|
||||
unsigned int uncache_size;
|
||||
uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
|
||||
/* data collection buffer for read from 80331 */
|
||||
int32_t rqbuf_getIndex;
|
||||
int32_t rqbuf_getIndex;
|
||||
/* first of read buffer */
|
||||
int32_t rqbuf_putIndex;
|
||||
int32_t rqbuf_putIndex;
|
||||
/* last of read buffer */
|
||||
uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
|
||||
uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
|
||||
/* data collection buffer for write to 80331 */
|
||||
int32_t wqbuf_getIndex;
|
||||
int32_t wqbuf_getIndex;
|
||||
/* first of write buffer */
|
||||
int32_t wqbuf_putIndex;
|
||||
int32_t wqbuf_putIndex;
|
||||
/* last of write buffer */
|
||||
uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
|
||||
uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
|
||||
/* id0 ..... id15, lun0...lun7 */
|
||||
#define ARECA_RAID_GONE 0x55
|
||||
#define ARECA_RAID_GOOD 0xaa
|
||||
uint32_t num_resets;
|
||||
uint32_t num_aborts;
|
||||
uint32_t signature;
|
||||
uint32_t firm_request_len;
|
||||
uint32_t firm_numbers_queue;
|
||||
uint32_t firm_sdram_size;
|
||||
uint32_t firm_hd_channels;
|
||||
uint32_t firm_cfg_version;
|
||||
#define ARECA_RAID_GONE 0x55
|
||||
#define ARECA_RAID_GOOD 0xaa
|
||||
uint32_t num_resets;
|
||||
uint32_t num_aborts;
|
||||
uint32_t signature;
|
||||
uint32_t firm_request_len;
|
||||
uint32_t firm_numbers_queue;
|
||||
uint32_t firm_sdram_size;
|
||||
uint32_t firm_hd_channels;
|
||||
uint32_t firm_cfg_version;
|
||||
char firm_model[12];
|
||||
char firm_version[20];
|
||||
char device_map[20]; /*21,84-99*/
|
||||
struct work_struct arcmsr_do_message_isr_bh;
|
||||
struct timer_list eternal_timer;
|
||||
struct work_struct arcmsr_do_message_isr_bh;
|
||||
struct timer_list eternal_timer;
|
||||
unsigned short fw_flag;
|
||||
#define FW_NORMAL 0x0000
|
||||
#define FW_BOG 0x0001
|
||||
#define FW_DEADLOCK 0x0010
|
||||
atomic_t rq_map_token;
|
||||
atomic_t ante_token_value;
|
||||
uint32_t maxOutstanding;
|
||||
int vector_count;
|
||||
#define FW_NORMAL 0x0000
|
||||
#define FW_BOG 0x0001
|
||||
#define FW_DEADLOCK 0x0010
|
||||
atomic_t rq_map_token;
|
||||
atomic_t ante_token_value;
|
||||
uint32_t maxOutstanding;
|
||||
int vector_count;
|
||||
uint32_t maxFreeCCB;
|
||||
struct timer_list refresh_timer;
|
||||
uint32_t doneq_index;
|
||||
uint32_t ccbsize;
|
||||
uint32_t in_doorbell;
|
||||
uint32_t out_doorbell;
|
||||
uint32_t completionQ_entry;
|
||||
pCompletion_Q pCompletionQ;
|
||||
};/* HW_DEVICE_EXTENSION */
|
||||
/*
|
||||
*******************************************************************************
|
||||
@ -732,29 +856,30 @@ struct AdapterControlBlock
|
||||
*/
|
||||
struct CommandControlBlock{
|
||||
/*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
|
||||
struct list_head list; /*x32: 8byte, x64: 16byte*/
|
||||
struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
|
||||
struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
|
||||
uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
|
||||
uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
|
||||
uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
|
||||
#define CCB_FLAG_READ 0x0000
|
||||
#define CCB_FLAG_WRITE 0x0001
|
||||
#define CCB_FLAG_ERROR 0x0002
|
||||
#define CCB_FLAG_FLUSHCACHE 0x0004
|
||||
#define CCB_FLAG_MASTER_ABORTED 0x0008
|
||||
uint16_t startdone; /*x32:2byte,x32:2byte*/
|
||||
#define ARCMSR_CCB_DONE 0x0000
|
||||
#define ARCMSR_CCB_START 0x55AA
|
||||
#define ARCMSR_CCB_ABORTED 0xAA55
|
||||
#define ARCMSR_CCB_ILLEGAL 0xFFFF
|
||||
#if BITS_PER_LONG == 64
|
||||
struct list_head list; /*x32: 8byte, x64: 16byte*/
|
||||
struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
|
||||
struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
|
||||
uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
|
||||
uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
|
||||
uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
|
||||
#define CCB_FLAG_READ 0x0000
|
||||
#define CCB_FLAG_WRITE 0x0001
|
||||
#define CCB_FLAG_ERROR 0x0002
|
||||
#define CCB_FLAG_FLUSHCACHE 0x0004
|
||||
#define CCB_FLAG_MASTER_ABORTED 0x0008
|
||||
uint16_t startdone; /*x32:2byte,x32:2byte*/
|
||||
#define ARCMSR_CCB_DONE 0x0000
|
||||
#define ARCMSR_CCB_START 0x55AA
|
||||
#define ARCMSR_CCB_ABORTED 0xAA55
|
||||
#define ARCMSR_CCB_ILLEGAL 0xFFFF
|
||||
uint32_t smid;
|
||||
#if BITS_PER_LONG == 64
|
||||
/* ======================512+64 bytes======================== */
|
||||
uint32_t reserved[5]; /*24 byte*/
|
||||
#else
|
||||
uint32_t reserved[4]; /*16 byte*/
|
||||
#else
|
||||
/* ======================512+32 bytes======================== */
|
||||
uint32_t reserved; /*8 byte*/
|
||||
#endif
|
||||
// uint32_t reserved; /*4 byte*/
|
||||
#endif
|
||||
/* ======================================================= */
|
||||
struct ARCMSR_CDB arcmsr_cdb;
|
||||
};
|
||||
@ -788,13 +913,13 @@ struct SENSE_DATA
|
||||
** Outbound Interrupt Status Register - OISR
|
||||
*******************************************************************************
|
||||
*/
|
||||
#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
|
||||
#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
|
||||
#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
|
||||
#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
|
||||
#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
|
||||
#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
|
||||
#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
|
||||
#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
|
||||
#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
|
||||
#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
|
||||
#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
|
||||
#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
|
||||
#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
|
||||
#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
|
||||
(ARCMSR_MU_OUTBOUND_MESSAGE0_INT \
|
||||
|ARCMSR_MU_OUTBOUND_MESSAGE1_INT \
|
||||
|ARCMSR_MU_OUTBOUND_DOORBELL_INT \
|
||||
@ -805,13 +930,13 @@ struct SENSE_DATA
|
||||
** Outbound Interrupt Mask Register - OIMR
|
||||
*******************************************************************************
|
||||
*/
|
||||
#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
|
||||
#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
|
||||
#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
|
||||
#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
|
||||
#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
|
||||
#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
|
||||
#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
|
||||
#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
|
||||
#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
|
||||
#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
|
||||
#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
|
||||
#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
|
||||
#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
|
||||
#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
|
||||
|
||||
extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *);
|
||||
extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2011,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
|
||||
* have valid data in the sense buffer that could
|
||||
* confuse the higher levels.
|
||||
*/
|
||||
memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
|
||||
memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
|
||||
//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
|
||||
/*
|
||||
|
@ -1957,7 +1957,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
|
||||
{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
|
||||
};
|
||||
|
||||
*npciids = sizeof(__pciids) / sizeof(__pciids[0]);
|
||||
*npciids = ARRAY_SIZE(__pciids);
|
||||
*pciids = __pciids;
|
||||
}
|
||||
|
||||
|
@ -35,10 +35,10 @@
|
||||
|
||||
#define BFA_TRC_TS(_trcm) \
|
||||
({ \
|
||||
struct timeval tv; \
|
||||
struct timespec64 ts; \
|
||||
\
|
||||
do_gettimeofday(&tv); \
|
||||
(tv.tv_sec*1000000+tv.tv_usec); \
|
||||
ktime_get_ts64(&ts); \
|
||||
(ts.tv_sec*1000000+ts.tv_nsec / 1000); \
|
||||
})
|
||||
|
||||
#ifndef BFA_TRC_TS
|
||||
|
@ -1455,7 +1455,8 @@ struct bfa_aen_entry_s {
|
||||
enum bfa_aen_category aen_category;
|
||||
u32 aen_type;
|
||||
union bfa_aen_data_u aen_data;
|
||||
struct timeval aen_tv;
|
||||
u64 aen_tv_sec;
|
||||
u64 aen_tv_usec;
|
||||
u32 seq_num;
|
||||
u32 bfad_num;
|
||||
};
|
||||
|
@ -1250,8 +1250,8 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
|
||||
memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
|
||||
|
||||
rspnid->dap = s_id;
|
||||
rspnid->spn_len = (u8) strlen((char *)name);
|
||||
strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
|
||||
strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
|
||||
rspnid->spn_len = (u8) strlen(rspnid->spn);
|
||||
|
||||
return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
|
||||
}
|
||||
@ -1271,8 +1271,8 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
|
||||
memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
|
||||
|
||||
rsnn_nn->node_name = node_name;
|
||||
rsnn_nn->snn_len = (u8) strlen((char *)name);
|
||||
strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
|
||||
strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
|
||||
rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
|
||||
|
||||
return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
|
||||
}
|
||||
|
@ -468,7 +468,7 @@ bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
|
||||
bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
|
||||
{
|
||||
struct bfa_itnim_s *itnim;
|
||||
struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
|
||||
@ -1478,6 +1478,7 @@ bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
|
||||
return BFA_STATUS_IOPROFILE_OFF;
|
||||
|
||||
itnim->ioprofile.index = BFA_IOBUCKET_MAX;
|
||||
/* unsigned 32-bit time_t overflow here in y2106 */
|
||||
itnim->ioprofile.io_profile_start_time =
|
||||
bfa_io_profile_start_time(itnim->bfa);
|
||||
itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
|
||||
|
@ -136,7 +136,7 @@ struct bfa_fcpim_s {
|
||||
struct bfa_fcpim_del_itn_stats_s del_itn_stats;
|
||||
bfa_boolean_t ioredirect;
|
||||
bfa_boolean_t io_profile;
|
||||
u32 io_profile_start_time;
|
||||
time64_t io_profile_start_time;
|
||||
bfa_fcpim_profile_t profile_comp;
|
||||
bfa_fcpim_profile_t profile_start;
|
||||
};
|
||||
@ -310,7 +310,7 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
|
||||
struct bfa_itnim_iostats_s *stats, u8 lp_tag);
|
||||
void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
|
||||
struct bfa_itnim_iostats_s *itnim_stats);
|
||||
bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
|
||||
bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time);
|
||||
bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
|
||||
|
||||
#define bfa_fcpim_ioredirect_enabled(__bfa) \
|
||||
|
@ -769,23 +769,23 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
|
||||
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
|
||||
|
||||
/* Model name/number */
|
||||
strncpy((char *)&port_cfg->sym_name, model,
|
||||
BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
|
||||
strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
strlcpy(port_cfg->sym_name.symname, model,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* Driver Version */
|
||||
strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
|
||||
BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
|
||||
strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
strlcat(port_cfg->sym_name.symname, driver_info->version,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* Host machine name */
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
(char *)driver_info->host_machine_name,
|
||||
BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
|
||||
strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
driver_info->host_machine_name,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/*
|
||||
* Host OS Info :
|
||||
@ -793,24 +793,24 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
|
||||
* OS name string and instead copy the entire OS info string (64 bytes).
|
||||
*/
|
||||
if (driver_info->host_os_patch[0] == '\0') {
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
(char *)driver_info->host_os_name,
|
||||
BFA_FCS_OS_STR_LEN);
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
driver_info->host_os_name,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
} else {
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
(char *)driver_info->host_os_name,
|
||||
BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
driver_info->host_os_name,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* Append host OS Patch Info */
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
(char *)driver_info->host_os_patch,
|
||||
BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
driver_info->host_os_patch,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
}
|
||||
|
||||
/* null terminate */
|
||||
@ -830,26 +830,26 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
|
||||
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
|
||||
|
||||
/* Model name/number */
|
||||
strncpy((char *)&port_cfg->node_sym_name, model,
|
||||
BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
|
||||
strncat((char *)&port_cfg->node_sym_name,
|
||||
strlcpy(port_cfg->node_sym_name.symname, model,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->node_sym_name.symname,
|
||||
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* Driver Version */
|
||||
strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
|
||||
BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
|
||||
strncat((char *)&port_cfg->node_sym_name,
|
||||
strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->node_sym_name.symname,
|
||||
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* Host machine name */
|
||||
strncat((char *)&port_cfg->node_sym_name,
|
||||
(char *)driver_info->host_machine_name,
|
||||
BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
|
||||
strncat((char *)&port_cfg->node_sym_name,
|
||||
strlcat(port_cfg->node_sym_name.symname,
|
||||
driver_info->host_machine_name,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->node_sym_name.symname,
|
||||
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* null terminate */
|
||||
port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
|
||||
|
@ -2642,10 +2642,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
|
||||
bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
|
||||
hba_attr->fw_version);
|
||||
|
||||
strncpy(hba_attr->driver_version, (char *)driver_info->version,
|
||||
strlcpy(hba_attr->driver_version, (char *)driver_info->version,
|
||||
sizeof(hba_attr->driver_version));
|
||||
|
||||
strncpy(hba_attr->os_name, driver_info->host_os_name,
|
||||
strlcpy(hba_attr->os_name, driver_info->host_os_name,
|
||||
sizeof(hba_attr->os_name));
|
||||
|
||||
/*
|
||||
@ -2653,23 +2653,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
|
||||
* to the os name along with a separator
|
||||
*/
|
||||
if (driver_info->host_os_patch[0] != '\0') {
|
||||
strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
strncat(hba_attr->os_name, driver_info->host_os_patch,
|
||||
sizeof(driver_info->host_os_patch));
|
||||
strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(hba_attr->os_name));
|
||||
strlcat(hba_attr->os_name, driver_info->host_os_patch,
|
||||
sizeof(hba_attr->os_name));
|
||||
}
|
||||
|
||||
/* Retrieve the max frame size from the port attr */
|
||||
bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
|
||||
hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
|
||||
|
||||
strncpy(hba_attr->node_sym_name.symname,
|
||||
strlcpy(hba_attr->node_sym_name.symname,
|
||||
port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
|
||||
strcpy(hba_attr->vendor_info, "QLogic");
|
||||
hba_attr->num_ports =
|
||||
cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
|
||||
hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
|
||||
strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
|
||||
strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
|
||||
|
||||
}
|
||||
|
||||
@ -2736,20 +2736,20 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
|
||||
/*
|
||||
* OS device Name
|
||||
*/
|
||||
strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
|
||||
strlcpy(port_attr->os_device_name, driver_info->os_device_name,
|
||||
sizeof(port_attr->os_device_name));
|
||||
|
||||
/*
|
||||
* Host name
|
||||
*/
|
||||
strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
|
||||
strlcpy(port_attr->host_name, driver_info->host_machine_name,
|
||||
sizeof(port_attr->host_name));
|
||||
|
||||
port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
|
||||
port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
|
||||
|
||||
strncpy(port_attr->port_sym_name.symname,
|
||||
(char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN);
|
||||
strlcpy(port_attr->port_sym_name.symname,
|
||||
bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
|
||||
bfa_fcs_lport_get_attr(port, &lport_attr);
|
||||
port_attr->port_type = cpu_to_be32(lport_attr.port_type);
|
||||
port_attr->scos = pport_attr.cos_supported;
|
||||
@ -3229,7 +3229,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
|
||||
rsp_str[gmal_entry->len-1] = 0;
|
||||
|
||||
/* copy IP Address to fabric */
|
||||
strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
|
||||
strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
|
||||
gmal_entry->ip_addr,
|
||||
BFA_FCS_FABRIC_IPADDR_SZ);
|
||||
break;
|
||||
@ -4667,21 +4667,13 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
|
||||
* to that of the base port.
|
||||
*/
|
||||
|
||||
strncpy((char *)psymbl,
|
||||
(char *) &
|
||||
(bfa_fcs_lport_get_psym_name
|
||||
strlcpy(symbl,
|
||||
(char *)&(bfa_fcs_lport_get_psym_name
|
||||
(bfa_fcs_get_base_port(port->fcs))),
|
||||
strlen((char *) &
|
||||
bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
|
||||
(port->fcs))));
|
||||
sizeof(symbl));
|
||||
|
||||
/* Ensure we have a null terminating string. */
|
||||
((char *)psymbl)[strlen((char *) &
|
||||
bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
|
||||
(port->fcs)))] = 0;
|
||||
strncat((char *)psymbl,
|
||||
(char *) &(bfa_fcs_lport_get_psym_name(port)),
|
||||
strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
|
||||
strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)),
|
||||
sizeof(symbl));
|
||||
} else {
|
||||
psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
|
||||
}
|
||||
@ -5173,7 +5165,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
|
||||
struct fchs_s fchs;
|
||||
struct bfa_fcxp_s *fcxp;
|
||||
u8 symbl[256];
|
||||
u8 *psymbl = &symbl[0];
|
||||
int len;
|
||||
|
||||
/* Avoid sending RSPN in the following states. */
|
||||
@ -5203,22 +5194,17 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
|
||||
* For Vports, we append the vport's port symbolic name
|
||||
* to that of the base port.
|
||||
*/
|
||||
strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
|
||||
strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
|
||||
(bfa_fcs_get_base_port(port->fcs))),
|
||||
strlen((char *)&bfa_fcs_lport_get_psym_name(
|
||||
bfa_fcs_get_base_port(port->fcs))));
|
||||
sizeof(symbl));
|
||||
|
||||
/* Ensure we have a null terminating string. */
|
||||
((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
|
||||
bfa_fcs_get_base_port(port->fcs)))] = 0;
|
||||
|
||||
strncat((char *)psymbl,
|
||||
strlcat(symbl,
|
||||
(char *)&(bfa_fcs_lport_get_psym_name(port)),
|
||||
strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
|
||||
sizeof(symbl));
|
||||
}
|
||||
|
||||
len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
|
||||
bfa_fcs_lport_get_fcid(port), 0, psymbl);
|
||||
bfa_fcs_lport_get_fcid(port), 0, symbl);
|
||||
|
||||
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
|
||||
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
|
||||
|
@ -1809,13 +1809,12 @@ static void
|
||||
bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
struct bfi_ioc_ctrl_req_s enable_req;
|
||||
struct timeval tv;
|
||||
|
||||
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
|
||||
bfa_ioc_portid(ioc));
|
||||
enable_req.clscode = cpu_to_be16(ioc->clscode);
|
||||
do_gettimeofday(&tv);
|
||||
enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
|
||||
/* unsigned 32-bit time_t overflow in y2106 */
|
||||
enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
|
||||
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
|
||||
}
|
||||
|
||||
@ -1826,6 +1825,9 @@ bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
|
||||
|
||||
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
|
||||
bfa_ioc_portid(ioc));
|
||||
disable_req.clscode = cpu_to_be16(ioc->clscode);
|
||||
/* unsigned 32-bit time_t overflow in y2106 */
|
||||
disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
|
||||
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
|
||||
}
|
||||
|
||||
@ -2803,7 +2805,7 @@ void
|
||||
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
|
||||
{
|
||||
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
|
||||
strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
|
||||
strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -96,14 +96,11 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
|
||||
port->stats_busy = BFA_FALSE;
|
||||
|
||||
if (status == BFA_STATUS_OK) {
|
||||
struct timeval tv;
|
||||
|
||||
memcpy(port->stats, port->stats_dma.kva,
|
||||
sizeof(union bfa_port_stats_u));
|
||||
bfa_port_stats_swap(port, port->stats);
|
||||
|
||||
do_gettimeofday(&tv);
|
||||
port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
|
||||
port->stats->fc.secs_reset = ktime_get_seconds() - port->stats_reset_time;
|
||||
}
|
||||
|
||||
if (port->stats_cbfn) {
|
||||
@ -124,16 +121,13 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
|
||||
static void
|
||||
bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
|
||||
{
|
||||
struct timeval tv;
|
||||
|
||||
port->stats_status = status;
|
||||
port->stats_busy = BFA_FALSE;
|
||||
|
||||
/*
|
||||
* re-initialize time stamp for stats reset
|
||||
*/
|
||||
do_gettimeofday(&tv);
|
||||
port->stats_reset_time = tv.tv_sec;
|
||||
port->stats_reset_time = ktime_get_seconds();
|
||||
|
||||
if (port->stats_cbfn) {
|
||||
port->stats_cbfn(port->stats_cbarg, status);
|
||||
@ -471,8 +465,6 @@ void
|
||||
bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
|
||||
void *dev, struct bfa_trc_mod_s *trcmod)
|
||||
{
|
||||
struct timeval tv;
|
||||
|
||||
WARN_ON(!port);
|
||||
|
||||
port->dev = dev;
|
||||
@ -494,8 +486,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
|
||||
/*
|
||||
* initialize time stamp for stats reset
|
||||
*/
|
||||
do_gettimeofday(&tv);
|
||||
port->stats_reset_time = tv.tv_sec;
|
||||
port->stats_reset_time = ktime_get_seconds();
|
||||
|
||||
bfa_trc(port, 0);
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ struct bfa_port_s {
|
||||
bfa_port_stats_cbfn_t stats_cbfn;
|
||||
void *stats_cbarg;
|
||||
bfa_status_t stats_status;
|
||||
u32 stats_reset_time;
|
||||
time64_t stats_reset_time;
|
||||
union bfa_port_stats_u *stats;
|
||||
struct bfa_dma_s stats_dma;
|
||||
bfa_boolean_t endis_pending;
|
||||
|
@ -288,18 +288,6 @@ plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64
|
||||
bfa_get_log_time(void)
|
||||
{
|
||||
u64 system_time = 0;
|
||||
struct timeval tv;
|
||||
do_gettimeofday(&tv);
|
||||
|
||||
/* We are interested in seconds only. */
|
||||
system_time = tv.tv_sec;
|
||||
return system_time;
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
|
||||
{
|
||||
@ -320,7 +308,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
|
||||
|
||||
memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
|
||||
|
||||
pl_recp->tv = bfa_get_log_time();
|
||||
pl_recp->tv = ktime_get_real_seconds();
|
||||
BFA_PL_LOG_REC_INCR(plog->tail);
|
||||
|
||||
if (plog->head == plog->tail)
|
||||
@ -350,8 +338,8 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
|
||||
lp.eid = event;
|
||||
lp.log_type = BFA_PL_LOG_TYPE_STRING;
|
||||
lp.misc = misc;
|
||||
strncpy(lp.log_entry.string_log, log_str,
|
||||
BFA_PL_STRING_LOG_SZ - 1);
|
||||
strlcpy(lp.log_entry.string_log, log_str,
|
||||
BFA_PL_STRING_LOG_SZ);
|
||||
lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
|
||||
bfa_plog_add(plog, &lp);
|
||||
}
|
||||
@ -3047,7 +3035,6 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
||||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
|
||||
struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
|
||||
struct bfa_fcport_ln_s *ln = &fcport->ln;
|
||||
struct timeval tv;
|
||||
|
||||
fcport->bfa = bfa;
|
||||
ln->fcport = fcport;
|
||||
@ -3060,8 +3047,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
||||
/*
|
||||
* initialize time stamp for stats reset
|
||||
*/
|
||||
do_gettimeofday(&tv);
|
||||
fcport->stats_reset_time = tv.tv_sec;
|
||||
fcport->stats_reset_time = ktime_get_seconds();
|
||||
fcport->stats_dma_ready = BFA_FALSE;
|
||||
|
||||
/*
|
||||
@ -3295,9 +3281,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
|
||||
union bfa_fcport_stats_u *ret;
|
||||
|
||||
if (complete) {
|
||||
struct timeval tv;
|
||||
if (fcport->stats_status == BFA_STATUS_OK)
|
||||
do_gettimeofday(&tv);
|
||||
time64_t time = ktime_get_seconds();
|
||||
|
||||
list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
|
||||
bfa_q_deq(&fcport->stats_pending_q, &qe);
|
||||
@ -3312,7 +3296,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
|
||||
bfa_fcport_fcoe_stats_swap(&ret->fcoe,
|
||||
&fcport->stats->fcoe);
|
||||
ret->fcoe.secs_reset =
|
||||
tv.tv_sec - fcport->stats_reset_time;
|
||||
time - fcport->stats_reset_time;
|
||||
}
|
||||
}
|
||||
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
|
||||
@ -3373,13 +3357,10 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
|
||||
struct list_head *qe, *qen;
|
||||
|
||||
if (complete) {
|
||||
struct timeval tv;
|
||||
|
||||
/*
|
||||
* re-initialize time stamp for stats reset
|
||||
*/
|
||||
do_gettimeofday(&tv);
|
||||
fcport->stats_reset_time = tv.tv_sec;
|
||||
fcport->stats_reset_time = ktime_get_seconds();
|
||||
list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
|
||||
bfa_q_deq(&fcport->statsclr_pending_q, &qe);
|
||||
cb = (struct bfa_cb_pending_q_s *)qe;
|
||||
@ -6148,13 +6129,13 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
|
||||
/*
|
||||
* D-port
|
||||
*/
|
||||
#define bfa_dport_result_start(__dport, __mode) do { \
|
||||
(__dport)->result.start_time = bfa_get_log_time(); \
|
||||
(__dport)->result.status = DPORT_TEST_ST_INPRG; \
|
||||
(__dport)->result.mode = (__mode); \
|
||||
(__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
|
||||
(__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
|
||||
(__dport)->result.lpcnt = (__dport)->lpcnt; \
|
||||
#define bfa_dport_result_start(__dport, __mode) do { \
|
||||
(__dport)->result.start_time = ktime_get_real_seconds(); \
|
||||
(__dport)->result.status = DPORT_TEST_ST_INPRG; \
|
||||
(__dport)->result.mode = (__mode); \
|
||||
(__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
|
||||
(__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
|
||||
(__dport)->result.lpcnt = (__dport)->lpcnt; \
|
||||
} while (0)
|
||||
|
||||
static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
|
||||
@ -6588,7 +6569,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
|
||||
|
||||
switch (dport->i2hmsg.scn.state) {
|
||||
case BFI_DPORT_SCN_TESTCOMP:
|
||||
dport->result.end_time = bfa_get_log_time();
|
||||
dport->result.end_time = ktime_get_real_seconds();
|
||||
bfa_trc(dport->bfa, dport->result.end_time);
|
||||
|
||||
dport->result.status = msg->info.testcomp.status;
|
||||
@ -6635,7 +6616,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
|
||||
case BFI_DPORT_SCN_SUBTESTSTART:
|
||||
subtesttype = msg->info.teststart.type;
|
||||
dport->result.subtest[subtesttype].start_time =
|
||||
bfa_get_log_time();
|
||||
ktime_get_real_seconds();
|
||||
dport->result.subtest[subtesttype].status =
|
||||
DPORT_TEST_ST_INPRG;
|
||||
|
||||
|
@ -505,7 +505,7 @@ struct bfa_fcport_s {
|
||||
struct list_head stats_pending_q;
|
||||
struct list_head statsclr_pending_q;
|
||||
bfa_boolean_t stats_qfull;
|
||||
u32 stats_reset_time; /* stats reset time stamp */
|
||||
time64_t stats_reset_time; /* stats reset time stamp */
|
||||
bfa_boolean_t diag_busy; /* diag busy status */
|
||||
bfa_boolean_t beacon; /* port beacon status */
|
||||
bfa_boolean_t link_e2e_beacon; /* link beacon status */
|
||||
|
@ -610,13 +610,12 @@ bfad_hal_mem_alloc(struct bfad_s *bfad)
|
||||
/* Iterate through the KVA meminfo queue */
|
||||
list_for_each(km_qe, &kva_info->qe) {
|
||||
kva_elem = (struct bfa_mem_kva_s *) km_qe;
|
||||
kva_elem->kva = vmalloc(kva_elem->mem_len);
|
||||
kva_elem->kva = vzalloc(kva_elem->mem_len);
|
||||
if (kva_elem->kva == NULL) {
|
||||
bfad_hal_mem_release(bfad);
|
||||
rc = BFA_STATUS_ENOMEM;
|
||||
goto ext;
|
||||
}
|
||||
memset(kva_elem->kva, 0, kva_elem->mem_len);
|
||||
}
|
||||
|
||||
/* Iterate through the DMA meminfo queue */
|
||||
@ -981,20 +980,20 @@ bfad_start_ops(struct bfad_s *bfad) {
|
||||
|
||||
/* Fill the driver_info info to fcs*/
|
||||
memset(&driver_info, 0, sizeof(driver_info));
|
||||
strncpy(driver_info.version, BFAD_DRIVER_VERSION,
|
||||
sizeof(driver_info.version) - 1);
|
||||
strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
|
||||
sizeof(driver_info.version));
|
||||
if (host_name)
|
||||
strncpy(driver_info.host_machine_name, host_name,
|
||||
sizeof(driver_info.host_machine_name) - 1);
|
||||
strlcpy(driver_info.host_machine_name, host_name,
|
||||
sizeof(driver_info.host_machine_name));
|
||||
if (os_name)
|
||||
strncpy(driver_info.host_os_name, os_name,
|
||||
sizeof(driver_info.host_os_name) - 1);
|
||||
strlcpy(driver_info.host_os_name, os_name,
|
||||
sizeof(driver_info.host_os_name));
|
||||
if (os_patch)
|
||||
strncpy(driver_info.host_os_patch, os_patch,
|
||||
sizeof(driver_info.host_os_patch) - 1);
|
||||
strlcpy(driver_info.host_os_patch, os_patch,
|
||||
sizeof(driver_info.host_os_patch));
|
||||
|
||||
strncpy(driver_info.os_device_name, bfad->pci_name,
|
||||
sizeof(driver_info.os_device_name) - 1);
|
||||
strlcpy(driver_info.os_device_name, bfad->pci_name,
|
||||
sizeof(driver_info.os_device_name));
|
||||
|
||||
/* FCS driver info init */
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
|
@ -487,7 +487,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
|
||||
struct bfad_im_port_s *im_port =
|
||||
(struct bfad_im_port_s *) vport->drv_port.im_port;
|
||||
struct bfad_s *bfad = im_port->bfad;
|
||||
struct bfad_port_s *port;
|
||||
struct bfa_fcs_vport_s *fcs_vport;
|
||||
struct Scsi_Host *vshost;
|
||||
wwn_t pwwn;
|
||||
@ -502,8 +501,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
|
||||
return 0;
|
||||
}
|
||||
|
||||
port = im_port->port;
|
||||
|
||||
vshost = vport->drv_port.im_port->shost;
|
||||
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
|
||||
|
||||
@ -843,7 +840,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
|
||||
char symname[BFA_SYMNAME_MAXLEN];
|
||||
|
||||
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
|
||||
strncpy(symname, port_attr.port_cfg.sym_name.symname,
|
||||
strlcpy(symname, port_attr.port_cfg.sym_name.symname,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", symname);
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
|
||||
|
||||
/* fill in driver attr info */
|
||||
strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
|
||||
strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
|
||||
strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
|
||||
BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
|
||||
strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
|
||||
iocmd->ioc_attr.adapter_attr.fw_ver);
|
||||
@ -315,9 +315,9 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
|
||||
iocmd->attr.port_type = port_attr.port_type;
|
||||
iocmd->attr.loopback = port_attr.loopback;
|
||||
iocmd->attr.authfail = port_attr.authfail;
|
||||
strncpy(iocmd->attr.port_symname.symname,
|
||||
strlcpy(iocmd->attr.port_symname.symname,
|
||||
port_attr.port_cfg.sym_name.symname,
|
||||
sizeof(port_attr.port_cfg.sym_name.symname));
|
||||
sizeof(iocmd->attr.port_symname.symname));
|
||||
|
||||
iocmd->status = BFA_STATUS_OK;
|
||||
return 0;
|
||||
@ -2094,13 +2094,11 @@ bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
|
||||
{
|
||||
struct bfa_bsg_fcpim_profile_s *iocmd =
|
||||
(struct bfa_bsg_fcpim_profile_s *)cmd;
|
||||
struct timeval tv;
|
||||
unsigned long flags;
|
||||
|
||||
do_gettimeofday(&tv);
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
|
||||
iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
|
||||
iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds());
|
||||
else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
|
||||
iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
|
@ -81,7 +81,7 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
|
||||
|
||||
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
|
||||
|
||||
fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
|
||||
fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
|
||||
if (!fw_debug->debug_buffer) {
|
||||
kfree(fw_debug);
|
||||
printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
|
||||
@ -89,8 +89,6 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
|
||||
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
|
||||
fw_debug->debug_buffer,
|
||||
@ -125,7 +123,7 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
|
||||
|
||||
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
|
||||
|
||||
fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
|
||||
fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
|
||||
if (!fw_debug->debug_buffer) {
|
||||
kfree(fw_debug);
|
||||
printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
|
||||
@ -133,8 +131,6 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
|
||||
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
|
||||
fw_debug->debug_buffer,
|
||||
|
@ -141,16 +141,28 @@ struct bfad_im_s {
|
||||
} while (0)
|
||||
|
||||
/* post fc_host vendor event */
|
||||
#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
|
||||
do_gettimeofday(&(_entry)->aen_tv); \
|
||||
(_entry)->bfad_num = (_drv)->inst_no; \
|
||||
(_entry)->seq_num = (_cnt); \
|
||||
(_entry)->aen_category = (_cat); \
|
||||
(_entry)->aen_type = (_evt); \
|
||||
if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
|
||||
queue_work((_drv)->im->drv_workq, \
|
||||
&(_drv)->im->aen_im_notify_work); \
|
||||
} while (0)
|
||||
static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
|
||||
struct bfad_s *drv, int cnt,
|
||||
enum bfa_aen_category cat,
|
||||
enum bfa_ioc_aen_event evt)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
|
||||
ktime_get_real_ts64(&ts);
|
||||
/*
|
||||
* 'unsigned long aen_tv_sec' overflows in y2106 on 32-bit
|
||||
* architectures, or in 2038 if user space interprets it
|
||||
* as 'signed'.
|
||||
*/
|
||||
entry->aen_tv_sec = ts.tv_sec;
|
||||
entry->aen_tv_usec = ts.tv_nsec / NSEC_PER_USEC;
|
||||
entry->bfad_num = drv->inst_no;
|
||||
entry->seq_num = cnt;
|
||||
entry->aen_category = cat;
|
||||
entry->aen_type = evt;
|
||||
if (drv->bfad_flags & BFAD_FC4_PROBE_DONE)
|
||||
queue_work(drv->im->drv_workq, &drv->im->aen_im_notify_work);
|
||||
}
|
||||
|
||||
struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
|
||||
struct bfad_s *);
|
||||
|
@ -1552,7 +1552,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
|
||||
|
||||
rc = bnx2fc_shost_config(lport, parent);
|
||||
if (rc) {
|
||||
printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
|
||||
printk(KERN_ERR PFX "Couldn't configure shost for %s\n",
|
||||
interface->netdev->name);
|
||||
goto lp_config_err;
|
||||
}
|
||||
@ -1560,7 +1560,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
|
||||
/* Initialize the libfc library */
|
||||
rc = bnx2fc_libfc_config(lport);
|
||||
if (rc) {
|
||||
printk(KERN_ERR PFX "Couldnt configure libfc\n");
|
||||
printk(KERN_ERR PFX "Couldn't configure libfc\n");
|
||||
goto shost_err;
|
||||
}
|
||||
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
|
||||
|
@ -1857,16 +1857,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
|
||||
* entries. Hence the limit with one page is 8192 task context
|
||||
* entries.
|
||||
*/
|
||||
hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
|
||||
PAGE_SIZE,
|
||||
&hba->task_ctx_bd_dma,
|
||||
GFP_KERNEL);
|
||||
hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
|
||||
PAGE_SIZE,
|
||||
&hba->task_ctx_bd_dma,
|
||||
GFP_KERNEL);
|
||||
if (!hba->task_ctx_bd_tbl) {
|
||||
printk(KERN_ERR PFX "unable to allocate task context BDT\n");
|
||||
rc = -1;
|
||||
goto out;
|
||||
}
|
||||
memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Allocate task_ctx which is an array of pointers pointing to
|
||||
@ -1895,16 +1894,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
|
||||
task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
|
||||
for (i = 0; i < task_ctx_arr_sz; i++) {
|
||||
|
||||
hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
|
||||
PAGE_SIZE,
|
||||
&hba->task_ctx_dma[i],
|
||||
GFP_KERNEL);
|
||||
hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
|
||||
PAGE_SIZE,
|
||||
&hba->task_ctx_dma[i],
|
||||
GFP_KERNEL);
|
||||
if (!hba->task_ctx[i]) {
|
||||
printk(KERN_ERR PFX "unable to alloc task context\n");
|
||||
rc = -1;
|
||||
goto out3;
|
||||
}
|
||||
memset(hba->task_ctx[i], 0, PAGE_SIZE);
|
||||
addr = (u64)hba->task_ctx_dma[i];
|
||||
task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
|
||||
task_ctx_bdt->lo = cpu_to_le32((u32)addr);
|
||||
@ -2033,28 +2031,23 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
|
||||
}
|
||||
|
||||
for (i = 0; i < segment_count; ++i) {
|
||||
hba->hash_tbl_segments[i] =
|
||||
dma_alloc_coherent(&hba->pcidev->dev,
|
||||
BNX2FC_HASH_TBL_CHUNK_SIZE,
|
||||
&dma_segment_array[i],
|
||||
GFP_KERNEL);
|
||||
hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
|
||||
BNX2FC_HASH_TBL_CHUNK_SIZE,
|
||||
&dma_segment_array[i],
|
||||
GFP_KERNEL);
|
||||
if (!hba->hash_tbl_segments[i]) {
|
||||
printk(KERN_ERR PFX "hash segment alloc failed\n");
|
||||
goto cleanup_dma;
|
||||
}
|
||||
memset(hba->hash_tbl_segments[i], 0,
|
||||
BNX2FC_HASH_TBL_CHUNK_SIZE);
|
||||
}
|
||||
|
||||
hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
|
||||
PAGE_SIZE,
|
||||
&hba->hash_tbl_pbl_dma,
|
||||
GFP_KERNEL);
|
||||
hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
&hba->hash_tbl_pbl_dma,
|
||||
GFP_KERNEL);
|
||||
if (!hba->hash_tbl_pbl) {
|
||||
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
|
||||
goto cleanup_dma;
|
||||
}
|
||||
memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
|
||||
|
||||
pbl = hba->hash_tbl_pbl;
|
||||
for (i = 0; i < segment_count; ++i) {
|
||||
@ -2111,27 +2104,26 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
|
||||
return -ENOMEM;
|
||||
|
||||
mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
|
||||
hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
|
||||
&hba->t2_hash_tbl_ptr_dma,
|
||||
GFP_KERNEL);
|
||||
hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
|
||||
mem_size,
|
||||
&hba->t2_hash_tbl_ptr_dma,
|
||||
GFP_KERNEL);
|
||||
if (!hba->t2_hash_tbl_ptr) {
|
||||
printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
|
||||
bnx2fc_free_fw_resc(hba);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
|
||||
|
||||
mem_size = BNX2FC_NUM_MAX_SESS *
|
||||
sizeof(struct fcoe_t2_hash_table_entry);
|
||||
hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
|
||||
&hba->t2_hash_tbl_dma,
|
||||
GFP_KERNEL);
|
||||
hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
|
||||
&hba->t2_hash_tbl_dma,
|
||||
GFP_KERNEL);
|
||||
if (!hba->t2_hash_tbl) {
|
||||
printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
|
||||
bnx2fc_free_fw_resc(hba);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(hba->t2_hash_tbl, 0x00, mem_size);
|
||||
for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
|
||||
addr = (unsigned long) hba->t2_hash_tbl_dma +
|
||||
((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
|
||||
@ -2148,16 +2140,14 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
|
||||
PAGE_SIZE,
|
||||
&hba->stats_buf_dma,
|
||||
GFP_KERNEL);
|
||||
hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
|
||||
&hba->stats_buf_dma,
|
||||
GFP_KERNEL);
|
||||
if (!hba->stats_buffer) {
|
||||
printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
|
||||
bnx2fc_free_fw_resc(hba);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(hba->stats_buffer, 0x00, PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -672,56 +672,52 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
|
||||
&tgt->sq_dma, GFP_KERNEL);
|
||||
tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
|
||||
&tgt->sq_dma, GFP_KERNEL);
|
||||
if (!tgt->sq) {
|
||||
printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
|
||||
tgt->sq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->sq, 0, tgt->sq_mem_size);
|
||||
|
||||
/* Allocate and map CQ */
|
||||
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
|
||||
tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
|
||||
&tgt->cq_dma, GFP_KERNEL);
|
||||
tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
|
||||
&tgt->cq_dma, GFP_KERNEL);
|
||||
if (!tgt->cq) {
|
||||
printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
|
||||
tgt->cq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->cq, 0, tgt->cq_mem_size);
|
||||
|
||||
/* Allocate and map RQ and RQ PBL */
|
||||
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
|
||||
tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
|
||||
&tgt->rq_dma, GFP_KERNEL);
|
||||
tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
|
||||
&tgt->rq_dma, GFP_KERNEL);
|
||||
if (!tgt->rq) {
|
||||
printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
|
||||
tgt->rq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->rq, 0, tgt->rq_mem_size);
|
||||
|
||||
tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
|
||||
tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
|
||||
&tgt->rq_pbl_dma, GFP_KERNEL);
|
||||
tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
|
||||
&tgt->rq_pbl_dma, GFP_KERNEL);
|
||||
if (!tgt->rq_pbl) {
|
||||
printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
|
||||
tgt->rq_pbl_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
|
||||
memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
|
||||
num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
|
||||
page = tgt->rq_dma;
|
||||
pbl = (u32 *)tgt->rq_pbl;
|
||||
@ -739,44 +735,43 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
|
||||
&tgt->xferq_dma, GFP_KERNEL);
|
||||
tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev,
|
||||
tgt->xferq_mem_size, &tgt->xferq_dma,
|
||||
GFP_KERNEL);
|
||||
if (!tgt->xferq) {
|
||||
printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
|
||||
tgt->xferq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->xferq, 0, tgt->xferq_mem_size);
|
||||
|
||||
/* Allocate and map CONFQ & CONFQ PBL */
|
||||
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
|
||||
tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
|
||||
&tgt->confq_dma, GFP_KERNEL);
|
||||
tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev,
|
||||
tgt->confq_mem_size, &tgt->confq_dma,
|
||||
GFP_KERNEL);
|
||||
if (!tgt->confq) {
|
||||
printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
|
||||
tgt->confq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->confq, 0, tgt->confq_mem_size);
|
||||
|
||||
tgt->confq_pbl_size =
|
||||
(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
|
||||
tgt->confq_pbl_size =
|
||||
(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
|
||||
|
||||
tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
|
||||
tgt->confq_pbl_size,
|
||||
&tgt->confq_pbl_dma, GFP_KERNEL);
|
||||
tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev,
|
||||
tgt->confq_pbl_size,
|
||||
&tgt->confq_pbl_dma, GFP_KERNEL);
|
||||
if (!tgt->confq_pbl) {
|
||||
printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
|
||||
tgt->confq_pbl_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
|
||||
memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
|
||||
num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
|
||||
page = tgt->confq_dma;
|
||||
pbl = (u32 *)tgt->confq_pbl;
|
||||
@ -792,15 +787,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
/* Allocate and map ConnDB */
|
||||
tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
|
||||
|
||||
tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
|
||||
tgt->conn_db_mem_size,
|
||||
&tgt->conn_db_dma, GFP_KERNEL);
|
||||
tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev,
|
||||
tgt->conn_db_mem_size,
|
||||
&tgt->conn_db_dma, GFP_KERNEL);
|
||||
if (!tgt->conn_db) {
|
||||
printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
|
||||
tgt->conn_db_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
|
||||
|
||||
|
||||
/* Allocate and map LCQ */
|
||||
@ -808,15 +802,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
|
||||
CNIC_PAGE_MASK;
|
||||
|
||||
tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
|
||||
&tgt->lcq_dma, GFP_KERNEL);
|
||||
tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
|
||||
&tgt->lcq_dma, GFP_KERNEL);
|
||||
|
||||
if (!tgt->lcq) {
|
||||
printk(KERN_ERR PFX "unable to allocate lcq %d\n",
|
||||
tgt->lcq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->lcq, 0, tgt->lcq_mem_size);
|
||||
|
||||
tgt->conn_db->rq_prod = 0x8000;
|
||||
|
||||
|
@ -547,12 +547,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
|
||||
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
|
||||
memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
|
||||
|
||||
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
|
||||
u32 tmp = nopout_wqe->lun[0];
|
||||
/* 57710 requires LUN field to be swapped */
|
||||
nopout_wqe->lun[0] = nopout_wqe->lun[1];
|
||||
nopout_wqe->lun[1] = tmp;
|
||||
}
|
||||
/* 57710 requires LUN field to be swapped */
|
||||
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
|
||||
swap(nopout_wqe->lun[0], nopout_wqe->lun[1]);
|
||||
|
||||
nopout_wqe->itt = ((u16)task->itt |
|
||||
(ISCSI_TASK_TYPE_MPATH <<
|
||||
@ -1073,15 +1070,14 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
|
||||
|
||||
/* Allocate memory area for actual SQ element */
|
||||
ep->qp.sq_virt =
|
||||
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
|
||||
&ep->qp.sq_phys, GFP_KERNEL);
|
||||
dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
|
||||
&ep->qp.sq_phys, GFP_KERNEL);
|
||||
if (!ep->qp.sq_virt) {
|
||||
printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
|
||||
ep->qp.sq_mem_size);
|
||||
goto mem_alloc_err;
|
||||
}
|
||||
|
||||
memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
|
||||
ep->qp.sq_first_qe = ep->qp.sq_virt;
|
||||
ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
|
||||
ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
|
||||
@ -1110,14 +1106,13 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
|
||||
|
||||
/* Allocate memory area for actual CQ element */
|
||||
ep->qp.cq_virt =
|
||||
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
|
||||
&ep->qp.cq_phys, GFP_KERNEL);
|
||||
dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
|
||||
&ep->qp.cq_phys, GFP_KERNEL);
|
||||
if (!ep->qp.cq_virt) {
|
||||
printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
|
||||
ep->qp.cq_mem_size);
|
||||
goto mem_alloc_err;
|
||||
}
|
||||
memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
|
||||
|
||||
ep->qp.cq_first_qe = ep->qp.cq_virt;
|
||||
ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
|
||||
|
@ -1258,7 +1258,7 @@ module_init(csio_init);
|
||||
module_exit(csio_exit);
|
||||
MODULE_AUTHOR(CSIO_DRV_AUTHOR);
|
||||
MODULE_DESCRIPTION(CSIO_DRV_DESC);
|
||||
MODULE_LICENSE(CSIO_DRV_LICENSE);
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
|
||||
MODULE_VERSION(CSIO_DRV_VERSION);
|
||||
MODULE_FIRMWARE(FW_FNAME_T5);
|
||||
|
@ -48,7 +48,6 @@
|
||||
#include "csio_hw.h"
|
||||
|
||||
#define CSIO_DRV_AUTHOR "Chelsio Communications"
|
||||
#define CSIO_DRV_LICENSE "Dual BSD/GPL"
|
||||
#define CSIO_DRV_DESC "Chelsio FCoE driver"
|
||||
#define CSIO_DRV_VERSION "1.0.0-ko"
|
||||
|
||||
|
@ -1216,7 +1216,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
|
||||
/* Queue mbox cmd, if another mbox cmd is active */
|
||||
if (mbp->mb_cbfn == NULL) {
|
||||
rv = -EBUSY;
|
||||
csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
|
||||
csio_dbg(hw, "Couldn't own Mailbox %x op:0x%x\n",
|
||||
hw->pfn, *((uint8_t *)mbp->mb));
|
||||
|
||||
goto error_out;
|
||||
@ -1244,14 +1244,14 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
|
||||
rv = owner ? -EBUSY : -ETIMEDOUT;
|
||||
|
||||
csio_dbg(hw,
|
||||
"Couldnt own Mailbox %x op:0x%x "
|
||||
"Couldn't own Mailbox %x op:0x%x "
|
||||
"owner:%x\n",
|
||||
hw->pfn, *((uint8_t *)mbp->mb), owner);
|
||||
goto error_out;
|
||||
} else {
|
||||
if (mbm->mcurrent == NULL) {
|
||||
csio_err(hw,
|
||||
"Couldnt own Mailbox %x "
|
||||
"Couldn't own Mailbox %x "
|
||||
"op:0x%x owner:%x\n",
|
||||
hw->pfn, *((uint8_t *)mbp->mb),
|
||||
owner);
|
||||
|
@ -1914,7 +1914,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
|
||||
if (task->sc) {
|
||||
task->hdr = (struct iscsi_hdr *)tdata->skb->data;
|
||||
} else {
|
||||
task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_KERNEL);
|
||||
task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC);
|
||||
if (!task->hdr) {
|
||||
__kfree_skb(tdata->skb);
|
||||
tdata->skb = NULL;
|
||||
|
@ -1,2 +1,2 @@
|
||||
obj-$(CONFIG_CXLFLASH) += cxlflash.o
|
||||
cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
|
||||
cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
|
||||
|
41
drivers/scsi/cxlflash/backend.h
Normal file
41
drivers/scsi/cxlflash/backend.h
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* CXL Flash Device Driver
|
||||
*
|
||||
* Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
|
||||
* Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
|
||||
*
|
||||
* Copyright (C) 2018 IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
|
||||
|
||||
struct cxlflash_backend_ops {
|
||||
struct module *module;
|
||||
void __iomem * (*psa_map)(void *);
|
||||
void (*psa_unmap)(void __iomem *);
|
||||
int (*process_element)(void *);
|
||||
int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
|
||||
void (*unmap_afu_irq)(void *, int, void *);
|
||||
int (*start_context)(void *);
|
||||
int (*stop_context)(void *);
|
||||
int (*afu_reset)(void *);
|
||||
void (*set_master)(void *);
|
||||
void * (*get_context)(struct pci_dev *, void *);
|
||||
void * (*dev_context_init)(struct pci_dev *, void *);
|
||||
int (*release_context)(void *);
|
||||
void (*perst_reloads_same_image)(void *, bool);
|
||||
ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
|
||||
int (*allocate_afu_irqs)(void *, int);
|
||||
void (*free_afu_irqs)(void *);
|
||||
void * (*create_afu)(struct pci_dev *);
|
||||
struct file * (*get_fd)(void *, struct file_operations *, int *);
|
||||
void * (*fops_get_context)(struct file *);
|
||||
int (*start_work)(void *, u64);
|
||||
int (*fd_mmap)(struct file *, struct vm_area_struct *);
|
||||
int (*fd_release)(struct inode *, struct file *);
|
||||
};
|
@ -25,6 +25,8 @@
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
|
||||
#include "backend.h"
|
||||
|
||||
extern const struct file_operations cxlflash_cxl_fops;
|
||||
|
||||
#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
|
||||
@ -114,6 +116,7 @@ enum cxlflash_hwq_mode {
|
||||
struct cxlflash_cfg {
|
||||
struct afu *afu;
|
||||
|
||||
const struct cxlflash_backend_ops *ops;
|
||||
struct pci_dev *dev;
|
||||
struct pci_device_id *dev_id;
|
||||
struct Scsi_Host *host;
|
||||
@ -129,7 +132,7 @@ struct cxlflash_cfg {
|
||||
int lr_port;
|
||||
atomic_t scan_host_needed;
|
||||
|
||||
struct cxl_afu *cxl_afu;
|
||||
void *afu_cookie;
|
||||
|
||||
atomic_t recovery_threads;
|
||||
struct mutex ctx_recovery_mutex;
|
||||
@ -203,8 +206,7 @@ struct hwq {
|
||||
* fields after this point
|
||||
*/
|
||||
struct afu *afu;
|
||||
struct cxl_context *ctx;
|
||||
struct cxl_ioctl_start_work work;
|
||||
void *ctx_cookie;
|
||||
struct sisl_host_map __iomem *host_map; /* MC host map */
|
||||
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
|
||||
ctx_hndl_t ctx_hndl; /* master's context handle */
|
||||
|
168
drivers/scsi/cxlflash/cxl_hw.c
Normal file
168
drivers/scsi/cxlflash/cxl_hw.c
Normal file
@ -0,0 +1,168 @@
|
||||
/*
|
||||
* CXL Flash Device Driver
|
||||
*
|
||||
* Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
|
||||
* Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
|
||||
*
|
||||
* Copyright (C) 2018 IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <misc/cxl.h>
|
||||
|
||||
#include "backend.h"
|
||||
|
||||
/*
|
||||
* The following routines map the cxlflash backend operations to existing CXL
|
||||
* kernel API function and are largely simple shims that provide an abstraction
|
||||
* for converting generic context and AFU cookies into cxl_context or cxl_afu
|
||||
* pointers.
|
||||
*/
|
||||
|
||||
static void __iomem *cxlflash_psa_map(void *ctx_cookie)
|
||||
{
|
||||
return cxl_psa_map(ctx_cookie);
|
||||
}
|
||||
|
||||
static void cxlflash_psa_unmap(void __iomem *addr)
|
||||
{
|
||||
cxl_psa_unmap(addr);
|
||||
}
|
||||
|
||||
static int cxlflash_process_element(void *ctx_cookie)
|
||||
{
|
||||
return cxl_process_element(ctx_cookie);
|
||||
}
|
||||
|
||||
static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
|
||||
irq_handler_t handler, void *cookie, char *name)
|
||||
{
|
||||
return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
|
||||
}
|
||||
|
||||
static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
|
||||
{
|
||||
cxl_unmap_afu_irq(ctx_cookie, num, cookie);
|
||||
}
|
||||
|
||||
static int cxlflash_start_context(void *ctx_cookie)
|
||||
{
|
||||
return cxl_start_context(ctx_cookie, 0, NULL);
|
||||
}
|
||||
|
||||
static int cxlflash_stop_context(void *ctx_cookie)
|
||||
{
|
||||
return cxl_stop_context(ctx_cookie);
|
||||
}
|
||||
|
||||
static int cxlflash_afu_reset(void *ctx_cookie)
|
||||
{
|
||||
return cxl_afu_reset(ctx_cookie);
|
||||
}
|
||||
|
||||
static void cxlflash_set_master(void *ctx_cookie)
|
||||
{
|
||||
cxl_set_master(ctx_cookie);
|
||||
}
|
||||
|
||||
static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
|
||||
{
|
||||
return cxl_get_context(dev);
|
||||
}
|
||||
|
||||
static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
|
||||
{
|
||||
return cxl_dev_context_init(dev);
|
||||
}
|
||||
|
||||
static int cxlflash_release_context(void *ctx_cookie)
|
||||
{
|
||||
return cxl_release_context(ctx_cookie);
|
||||
}
|
||||
|
||||
static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
|
||||
{
|
||||
cxl_perst_reloads_same_image(afu_cookie, image);
|
||||
}
|
||||
|
||||
static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
|
||||
void *buf, size_t count)
|
||||
{
|
||||
return cxl_read_adapter_vpd(dev, buf, count);
|
||||
}
|
||||
|
||||
static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
|
||||
{
|
||||
return cxl_allocate_afu_irqs(ctx_cookie, num);
|
||||
}
|
||||
|
||||
static void cxlflash_free_afu_irqs(void *ctx_cookie)
|
||||
{
|
||||
cxl_free_afu_irqs(ctx_cookie);
|
||||
}
|
||||
|
||||
static void *cxlflash_create_afu(struct pci_dev *dev)
|
||||
{
|
||||
return cxl_pci_to_afu(dev);
|
||||
}
|
||||
|
||||
static struct file *cxlflash_get_fd(void *ctx_cookie,
|
||||
struct file_operations *fops, int *fd)
|
||||
{
|
||||
return cxl_get_fd(ctx_cookie, fops, fd);
|
||||
}
|
||||
|
||||
static void *cxlflash_fops_get_context(struct file *file)
|
||||
{
|
||||
return cxl_fops_get_context(file);
|
||||
}
|
||||
|
||||
static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
|
||||
{
|
||||
struct cxl_ioctl_start_work work = { 0 };
|
||||
|
||||
work.num_interrupts = irqs;
|
||||
work.flags = CXL_START_WORK_NUM_IRQS;
|
||||
|
||||
return cxl_start_work(ctx_cookie, &work);
|
||||
}
|
||||
|
||||
static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
|
||||
{
|
||||
return cxl_fd_mmap(file, vm);
|
||||
}
|
||||
|
||||
static int cxlflash_fd_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
return cxl_fd_release(inode, file);
|
||||
}
|
||||
|
||||
const struct cxlflash_backend_ops cxlflash_cxl_ops = {
|
||||
.module = THIS_MODULE,
|
||||
.psa_map = cxlflash_psa_map,
|
||||
.psa_unmap = cxlflash_psa_unmap,
|
||||
.process_element = cxlflash_process_element,
|
||||
.map_afu_irq = cxlflash_map_afu_irq,
|
||||
.unmap_afu_irq = cxlflash_unmap_afu_irq,
|
||||
.start_context = cxlflash_start_context,
|
||||
.stop_context = cxlflash_stop_context,
|
||||
.afu_reset = cxlflash_afu_reset,
|
||||
.set_master = cxlflash_set_master,
|
||||
.get_context = cxlflash_get_context,
|
||||
.dev_context_init = cxlflash_dev_context_init,
|
||||
.release_context = cxlflash_release_context,
|
||||
.perst_reloads_same_image = cxlflash_perst_reloads_same_image,
|
||||
.read_adapter_vpd = cxlflash_read_adapter_vpd,
|
||||
.allocate_afu_irqs = cxlflash_allocate_afu_irqs,
|
||||
.free_afu_irqs = cxlflash_free_afu_irqs,
|
||||
.create_afu = cxlflash_create_afu,
|
||||
.get_fd = cxlflash_get_fd,
|
||||
.fops_get_context = cxlflash_fops_get_context,
|
||||
.start_work = cxlflash_start_work,
|
||||
.fd_mmap = cxlflash_fd_mmap,
|
||||
.fd_release = cxlflash_fd_release,
|
||||
};
|
@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
|
||||
cmd->parent = afu;
|
||||
cmd->hwq_index = hwq_index;
|
||||
|
||||
cmd->sa.ioasc = 0;
|
||||
cmd->rcb.ctx_id = hwq->ctx_hndl;
|
||||
cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
|
||||
cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
|
||||
@ -710,7 +711,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
|
||||
}
|
||||
|
||||
if (likely(afu->afu_map)) {
|
||||
cxl_psa_unmap((void __iomem *)afu->afu_map);
|
||||
cfg->ops->psa_unmap(afu->afu_map);
|
||||
afu->afu_map = NULL;
|
||||
}
|
||||
}
|
||||
@ -738,7 +739,7 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
|
||||
|
||||
hwq = get_hwq(afu, index);
|
||||
|
||||
if (!hwq->ctx) {
|
||||
if (!hwq->ctx_cookie) {
|
||||
dev_err(dev, "%s: returning with NULL MC\n", __func__);
|
||||
return;
|
||||
}
|
||||
@ -747,13 +748,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
|
||||
case UNMAP_THREE:
|
||||
/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
|
||||
if (index == PRIMARY_HWQ)
|
||||
cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
|
||||
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
|
||||
case UNMAP_TWO:
|
||||
cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
|
||||
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
|
||||
case UNMAP_ONE:
|
||||
cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
|
||||
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
|
||||
case FREE_IRQ:
|
||||
cxl_free_afu_irqs(hwq->ctx);
|
||||
cfg->ops->free_afu_irqs(hwq->ctx_cookie);
|
||||
/* fall through */
|
||||
case UNDO_NOOP:
|
||||
/* No action required */
|
||||
@ -782,15 +783,15 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
|
||||
|
||||
hwq = get_hwq(afu, index);
|
||||
|
||||
if (!hwq->ctx) {
|
||||
if (!hwq->ctx_cookie) {
|
||||
dev_err(dev, "%s: returning with NULL MC\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
WARN_ON(cxl_stop_context(hwq->ctx));
|
||||
WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
|
||||
if (index != PRIMARY_HWQ)
|
||||
WARN_ON(cxl_release_context(hwq->ctx));
|
||||
hwq->ctx = NULL;
|
||||
WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
|
||||
hwq->ctx_cookie = NULL;
|
||||
|
||||
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
|
||||
flush_pending_cmds(hwq);
|
||||
@ -1597,27 +1598,6 @@ out:
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* start_context() - starts the master context
|
||||
* @cfg: Internal structure associated with the host.
|
||||
* @index: Index of the hardware queue.
|
||||
*
|
||||
* Return: A success or failure value from CXL services.
|
||||
*/
|
||||
static int start_context(struct cxlflash_cfg *cfg, u32 index)
|
||||
{
|
||||
struct device *dev = &cfg->dev->dev;
|
||||
struct hwq *hwq = get_hwq(cfg->afu, index);
|
||||
int rc = 0;
|
||||
|
||||
rc = cxl_start_context(hwq->ctx,
|
||||
hwq->work.work_element_descriptor,
|
||||
NULL);
|
||||
|
||||
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* read_vpd() - obtains the WWPNs from VPD
|
||||
* @cfg: Internal structure associated with the host.
|
||||
@ -1640,7 +1620,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
|
||||
const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
|
||||
|
||||
/* Get the VPD data from the device */
|
||||
vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
|
||||
vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
|
||||
if (unlikely(vpd_size <= 0)) {
|
||||
dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
|
||||
__func__, vpd_size);
|
||||
@ -1732,6 +1712,7 @@ static void init_pcr(struct cxlflash_cfg *cfg)
|
||||
struct afu *afu = cfg->afu;
|
||||
struct sisl_ctrl_map __iomem *ctrl_map;
|
||||
struct hwq *hwq;
|
||||
void *cookie;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_CONTEXT; i++) {
|
||||
@ -1746,8 +1727,9 @@ static void init_pcr(struct cxlflash_cfg *cfg)
|
||||
/* Copy frequently used fields into hwq */
|
||||
for (i = 0; i < afu->num_hwqs; i++) {
|
||||
hwq = get_hwq(afu, i);
|
||||
cookie = hwq->ctx_cookie;
|
||||
|
||||
hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
|
||||
hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
|
||||
hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
|
||||
hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
|
||||
|
||||
@ -1925,13 +1907,13 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
|
||||
struct hwq *hwq)
|
||||
{
|
||||
struct device *dev = &cfg->dev->dev;
|
||||
struct cxl_context *ctx = hwq->ctx;
|
||||
void *ctx = hwq->ctx_cookie;
|
||||
int rc = 0;
|
||||
enum undo_level level = UNDO_NOOP;
|
||||
bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
|
||||
int num_irqs = is_primary_hwq ? 3 : 2;
|
||||
|
||||
rc = cxl_allocate_afu_irqs(ctx, num_irqs);
|
||||
rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
|
||||
if (unlikely(rc)) {
|
||||
dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
|
||||
__func__, rc);
|
||||
@ -1939,16 +1921,16 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
|
||||
"SISL_MSI_SYNC_ERROR");
|
||||
rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
|
||||
"SISL_MSI_SYNC_ERROR");
|
||||
if (unlikely(rc <= 0)) {
|
||||
dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
|
||||
level = FREE_IRQ;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
|
||||
"SISL_MSI_RRQ_UPDATED");
|
||||
rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
|
||||
"SISL_MSI_RRQ_UPDATED");
|
||||
if (unlikely(rc <= 0)) {
|
||||
dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
|
||||
level = UNMAP_ONE;
|
||||
@ -1959,8 +1941,8 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
|
||||
if (!is_primary_hwq)
|
||||
goto out;
|
||||
|
||||
rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
|
||||
"SISL_MSI_ASYNC_ERROR");
|
||||
rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
|
||||
"SISL_MSI_ASYNC_ERROR");
|
||||
if (unlikely(rc <= 0)) {
|
||||
dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
|
||||
level = UNMAP_TWO;
|
||||
@ -1979,7 +1961,7 @@ out:
|
||||
*/
|
||||
static int init_mc(struct cxlflash_cfg *cfg, u32 index)
|
||||
{
|
||||
struct cxl_context *ctx;
|
||||
void *ctx;
|
||||
struct device *dev = &cfg->dev->dev;
|
||||
struct hwq *hwq = get_hwq(cfg->afu, index);
|
||||
int rc = 0;
|
||||
@ -1990,23 +1972,23 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
|
||||
INIT_LIST_HEAD(&hwq->pending_cmds);
|
||||
|
||||
if (index == PRIMARY_HWQ)
|
||||
ctx = cxl_get_context(cfg->dev);
|
||||
ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
|
||||
else
|
||||
ctx = cxl_dev_context_init(cfg->dev);
|
||||
if (unlikely(!ctx)) {
|
||||
ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
|
||||
if (IS_ERR_OR_NULL(ctx)) {
|
||||
rc = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
WARN_ON(hwq->ctx);
|
||||
hwq->ctx = ctx;
|
||||
WARN_ON(hwq->ctx_cookie);
|
||||
hwq->ctx_cookie = ctx;
|
||||
|
||||
/* Set it up as a master with the CXL */
|
||||
cxl_set_master(ctx);
|
||||
cfg->ops->set_master(ctx);
|
||||
|
||||
/* Reset AFU when initializing primary context */
|
||||
if (index == PRIMARY_HWQ) {
|
||||
rc = cxl_afu_reset(ctx);
|
||||
rc = cfg->ops->afu_reset(ctx);
|
||||
if (unlikely(rc)) {
|
||||
dev_err(dev, "%s: AFU reset failed rc=%d\n",
|
||||
__func__, rc);
|
||||
@ -2020,11 +2002,8 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
|
||||
goto err2;
|
||||
}
|
||||
|
||||
/* This performs the equivalent of the CXL_IOCTL_START_WORK.
|
||||
* The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
|
||||
* element (pe) that is embedded in the context (ctx)
|
||||
*/
|
||||
rc = start_context(cfg, index);
|
||||
/* Finally, activate the context by starting it */
|
||||
rc = cfg->ops->start_context(hwq->ctx_cookie);
|
||||
if (unlikely(rc)) {
|
||||
dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
|
||||
level = UNMAP_THREE;
|
||||
@ -2037,9 +2016,9 @@ out:
|
||||
err2:
|
||||
term_intr(cfg, level, index);
|
||||
if (index != PRIMARY_HWQ)
|
||||
cxl_release_context(ctx);
|
||||
cfg->ops->release_context(ctx);
|
||||
err1:
|
||||
hwq->ctx = NULL;
|
||||
hwq->ctx_cookie = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2094,7 +2073,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
|
||||
struct hwq *hwq;
|
||||
int i;
|
||||
|
||||
cxl_perst_reloads_same_image(cfg->cxl_afu, true);
|
||||
cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
|
||||
|
||||
afu->num_hwqs = afu->desired_hwqs;
|
||||
for (i = 0; i < afu->num_hwqs; i++) {
|
||||
@ -2108,9 +2087,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
|
||||
|
||||
/* Map the entire MMIO space of the AFU using the first context */
|
||||
hwq = get_hwq(afu, PRIMARY_HWQ);
|
||||
afu->afu_map = cxl_psa_map(hwq->ctx);
|
||||
afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
|
||||
if (!afu->afu_map) {
|
||||
dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
|
||||
dev_err(dev, "%s: psa_map failed\n", __func__);
|
||||
rc = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
@ -3670,6 +3649,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
|
||||
|
||||
cfg->init_state = INIT_STATE_NONE;
|
||||
cfg->dev = pdev;
|
||||
cfg->ops = &cxlflash_cxl_ops;
|
||||
cfg->cxl_fops = cxlflash_cxl_fops;
|
||||
|
||||
/*
|
||||
@ -3701,7 +3681,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
|
||||
|
||||
pci_set_drvdata(pdev, cfg);
|
||||
|
||||
cfg->cxl_afu = cxl_pci_to_afu(pdev);
|
||||
cfg->afu_cookie = cfg->ops->create_afu(pdev);
|
||||
|
||||
rc = init_pci(cfg);
|
||||
if (rc) {
|
||||
|
@ -810,20 +810,22 @@ err:
|
||||
* init_context() - initializes a previously allocated context
|
||||
* @ctxi: Previously allocated context
|
||||
* @cfg: Internal structure associated with the host.
|
||||
* @ctx: Previously obtained CXL context reference.
|
||||
* @ctx: Previously obtained context cookie.
|
||||
* @ctxid: Previously obtained process element associated with CXL context.
|
||||
* @file: Previously obtained file associated with CXL context.
|
||||
* @perms: User-specified permissions.
|
||||
* @irqs: User-specified number of interrupts.
|
||||
*/
|
||||
static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
|
||||
struct cxl_context *ctx, int ctxid, struct file *file,
|
||||
u32 perms)
|
||||
void *ctx, int ctxid, struct file *file, u32 perms,
|
||||
u64 irqs)
|
||||
{
|
||||
struct afu *afu = cfg->afu;
|
||||
|
||||
ctxi->rht_perms = perms;
|
||||
ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
|
||||
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
|
||||
ctxi->irqs = irqs;
|
||||
ctxi->pid = task_tgid_nr(current); /* tgid = pid */
|
||||
ctxi->ctx = ctx;
|
||||
ctxi->cfg = cfg;
|
||||
@ -976,9 +978,9 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
|
||||
*/
|
||||
static int cxlflash_cxl_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct cxl_context *ctx = cxl_fops_get_context(file);
|
||||
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
|
||||
cxl_fops);
|
||||
void *ctx = cfg->ops->fops_get_context(file);
|
||||
struct device *dev = &cfg->dev->dev;
|
||||
struct ctx_info *ctxi = NULL;
|
||||
struct dk_cxlflash_detach detach = { { 0 }, 0 };
|
||||
@ -986,7 +988,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
|
||||
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
|
||||
int ctxid;
|
||||
|
||||
ctxid = cxl_process_element(ctx);
|
||||
ctxid = cfg->ops->process_element(ctx);
|
||||
if (unlikely(ctxid < 0)) {
|
||||
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
|
||||
__func__, ctx, ctxid);
|
||||
@ -1014,7 +1016,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
|
||||
list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
|
||||
_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
|
||||
out_release:
|
||||
cxl_fd_release(inode, file);
|
||||
cfg->ops->fd_release(inode, file);
|
||||
out:
|
||||
dev_dbg(dev, "%s: returning\n", __func__);
|
||||
return 0;
|
||||
@ -1089,9 +1091,9 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct file *file = vma->vm_file;
|
||||
struct cxl_context *ctx = cxl_fops_get_context(file);
|
||||
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
|
||||
cxl_fops);
|
||||
void *ctx = cfg->ops->fops_get_context(file);
|
||||
struct device *dev = &cfg->dev->dev;
|
||||
struct ctx_info *ctxi = NULL;
|
||||
struct page *err_page = NULL;
|
||||
@ -1099,7 +1101,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
|
||||
int rc = 0;
|
||||
int ctxid;
|
||||
|
||||
ctxid = cxl_process_element(ctx);
|
||||
ctxid = cfg->ops->process_element(ctx);
|
||||
if (unlikely(ctxid < 0)) {
|
||||
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
|
||||
__func__, ctx, ctxid);
|
||||
@ -1162,16 +1164,16 @@ static const struct vm_operations_struct cxlflash_mmap_vmops = {
|
||||
*/
|
||||
static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct cxl_context *ctx = cxl_fops_get_context(file);
|
||||
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
|
||||
cxl_fops);
|
||||
void *ctx = cfg->ops->fops_get_context(file);
|
||||
struct device *dev = &cfg->dev->dev;
|
||||
struct ctx_info *ctxi = NULL;
|
||||
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
|
||||
int ctxid;
|
||||
int rc = 0;
|
||||
|
||||
ctxid = cxl_process_element(ctx);
|
||||
ctxid = cfg->ops->process_element(ctx);
|
||||
if (unlikely(ctxid < 0)) {
|
||||
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
|
||||
__func__, ctx, ctxid);
|
||||
@ -1188,7 +1190,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
|
||||
dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
|
||||
|
||||
rc = cxl_fd_mmap(file, vma);
|
||||
rc = cfg->ops->fd_mmap(file, vma);
|
||||
if (likely(!rc)) {
|
||||
/* Insert ourself in the mmap fault handler path */
|
||||
ctxi->cxl_mmap_vmops = vma->vm_ops;
|
||||
@ -1307,23 +1309,23 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
|
||||
struct afu *afu = cfg->afu;
|
||||
struct llun_info *lli = sdev->hostdata;
|
||||
struct glun_info *gli = lli->parent;
|
||||
struct cxl_ioctl_start_work *work;
|
||||
struct ctx_info *ctxi = NULL;
|
||||
struct lun_access *lun_access = NULL;
|
||||
int rc = 0;
|
||||
u32 perms;
|
||||
int ctxid = -1;
|
||||
u64 irqs = attach->num_interrupts;
|
||||
u64 flags = 0UL;
|
||||
u64 rctxid = 0UL;
|
||||
struct file *file = NULL;
|
||||
|
||||
struct cxl_context *ctx = NULL;
|
||||
void *ctx = NULL;
|
||||
|
||||
int fd = -1;
|
||||
|
||||
if (attach->num_interrupts > 4) {
|
||||
if (irqs > 4) {
|
||||
dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
|
||||
__func__, attach->num_interrupts);
|
||||
__func__, irqs);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -1394,7 +1396,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
|
||||
goto err;
|
||||
}
|
||||
|
||||
ctx = cxl_dev_context_init(cfg->dev);
|
||||
ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
|
||||
if (IS_ERR_OR_NULL(ctx)) {
|
||||
dev_err(dev, "%s: Could not initialize context %p\n",
|
||||
__func__, ctx);
|
||||
@ -1402,25 +1404,21 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
|
||||
goto err;
|
||||
}
|
||||
|
||||
work = &ctxi->work;
|
||||
work->num_interrupts = attach->num_interrupts;
|
||||
work->flags = CXL_START_WORK_NUM_IRQS;
|
||||
|
||||
rc = cxl_start_work(ctx, work);
|
||||
rc = cfg->ops->start_work(ctx, irqs);
|
||||
if (unlikely(rc)) {
|
||||
dev_dbg(dev, "%s: Could not start context rc=%d\n",
|
||||
__func__, rc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ctxid = cxl_process_element(ctx);
|
||||
ctxid = cfg->ops->process_element(ctx);
|
||||
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
|
||||
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
|
||||
rc = -EPERM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
|
||||
file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
|
||||
if (unlikely(fd < 0)) {
|
||||
rc = -ENODEV;
|
||||
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
|
||||
@ -1431,7 +1429,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
|
||||
perms = SISL_RHT_PERM(attach->hdr.flags + 1);
|
||||
|
||||
/* Context mutex is locked upon return */
|
||||
init_context(ctxi, cfg, ctx, ctxid, file, perms);
|
||||
init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
|
||||
|
||||
rc = afu_attach(cfg, ctxi);
|
||||
if (unlikely(rc)) {
|
||||
@ -1479,8 +1477,8 @@ out:
|
||||
err:
|
||||
/* Cleanup CXL context; okay to 'stop' even if it was not started */
|
||||
if (!IS_ERR_OR_NULL(ctx)) {
|
||||
cxl_stop_context(ctx);
|
||||
cxl_release_context(ctx);
|
||||
cfg->ops->stop_context(ctx);
|
||||
cfg->ops->release_context(ctx);
|
||||
ctx = NULL;
|
||||
}
|
||||
|
||||
@ -1529,10 +1527,10 @@ static int recover_context(struct cxlflash_cfg *cfg,
|
||||
int fd = -1;
|
||||
int ctxid = -1;
|
||||
struct file *file;
|
||||
struct cxl_context *ctx;
|
||||
void *ctx;
|
||||
struct afu *afu = cfg->afu;
|
||||
|
||||
ctx = cxl_dev_context_init(cfg->dev);
|
||||
ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
|
||||
if (IS_ERR_OR_NULL(ctx)) {
|
||||
dev_err(dev, "%s: Could not initialize context %p\n",
|
||||
__func__, ctx);
|
||||
@ -1540,21 +1538,21 @@ static int recover_context(struct cxlflash_cfg *cfg,
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = cxl_start_work(ctx, &ctxi->work);
|
||||
rc = cfg->ops->start_work(ctx, ctxi->irqs);
|
||||
if (unlikely(rc)) {
|
||||
dev_dbg(dev, "%s: Could not start context rc=%d\n",
|
||||
__func__, rc);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
ctxid = cxl_process_element(ctx);
|
||||
ctxid = cfg->ops->process_element(ctx);
|
||||
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
|
||||
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
|
||||
rc = -EPERM;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
|
||||
file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
|
||||
if (unlikely(fd < 0)) {
|
||||
rc = -ENODEV;
|
||||
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
|
||||
@ -1601,9 +1599,9 @@ err3:
|
||||
fput(file);
|
||||
put_unused_fd(fd);
|
||||
err2:
|
||||
cxl_stop_context(ctx);
|
||||
cfg->ops->stop_context(ctx);
|
||||
err1:
|
||||
cxl_release_context(ctx);
|
||||
cfg->ops->release_context(ctx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -96,15 +96,15 @@ struct ctx_info {
|
||||
struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
|
||||
u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
|
||||
|
||||
struct cxl_ioctl_start_work work;
|
||||
u64 ctxid;
|
||||
u64 irqs; /* Number of interrupts requested for context */
|
||||
pid_t pid;
|
||||
bool initialized;
|
||||
bool unavail;
|
||||
bool err_recovery_active;
|
||||
struct mutex mutex; /* Context protection */
|
||||
struct kref kref;
|
||||
struct cxl_context *ctx;
|
||||
void *ctx;
|
||||
struct cxlflash_cfg *cfg;
|
||||
struct list_head luns; /* LUNs attached to this context */
|
||||
const struct vm_operations_struct *cxl_mmap_vmops;
|
||||
|
@ -40,6 +40,7 @@
|
||||
#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
|
||||
#define TPGS_SUPPORT_OFFLINE 0x40
|
||||
#define TPGS_SUPPORT_TRANSITION 0x80
|
||||
#define TPGS_SUPPORT_ALL 0xdf
|
||||
|
||||
#define RTPG_FMT_MASK 0x70
|
||||
#define RTPG_FMT_EXT_HDR 0x10
|
||||
@ -81,6 +82,7 @@ struct alua_port_group {
|
||||
int tpgs;
|
||||
int state;
|
||||
int pref;
|
||||
int valid_states;
|
||||
unsigned flags; /* used for optimizing STPG */
|
||||
unsigned char transition_tmo;
|
||||
unsigned long expiry;
|
||||
@ -243,6 +245,7 @@ static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
|
||||
pg->group_id = group_id;
|
||||
pg->tpgs = tpgs;
|
||||
pg->state = SCSI_ACCESS_STATE_OPTIMAL;
|
||||
pg->valid_states = TPGS_SUPPORT_ALL;
|
||||
if (optimize_stpg)
|
||||
pg->flags |= ALUA_OPTIMIZE_STPG;
|
||||
kref_init(&pg->kref);
|
||||
@ -516,7 +519,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
||||
{
|
||||
struct scsi_sense_hdr sense_hdr;
|
||||
struct alua_port_group *tmp_pg;
|
||||
int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE;
|
||||
int len, k, off, bufflen = ALUA_RTPG_SIZE;
|
||||
unsigned char *desc, *buff;
|
||||
unsigned err, retval;
|
||||
unsigned int tpg_desc_tbl_off;
|
||||
@ -541,6 +544,22 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
||||
retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
|
||||
|
||||
if (retval) {
|
||||
/*
|
||||
* Some (broken) implementations have a habit of returning
|
||||
* an error during things like firmware update etc.
|
||||
* But if the target only supports active/optimized there's
|
||||
* not much we can do; it's not that we can switch paths
|
||||
* or anything.
|
||||
* So ignore any errors to avoid spurious failures during
|
||||
* path failover.
|
||||
*/
|
||||
if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
|
||||
sdev_printk(KERN_INFO, sdev,
|
||||
"%s: ignoring rtpg result %d\n",
|
||||
ALUA_DH_NAME, retval);
|
||||
kfree(buff);
|
||||
return SCSI_DH_OK;
|
||||
}
|
||||
if (!scsi_sense_valid(&sense_hdr)) {
|
||||
sdev_printk(KERN_INFO, sdev,
|
||||
"%s: rtpg failed, result %d\n",
|
||||
@ -652,7 +671,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
if (tmp_pg == pg)
|
||||
valid_states = desc[1];
|
||||
tmp_pg->valid_states = desc[1];
|
||||
spin_unlock_irqrestore(&tmp_pg->lock, flags);
|
||||
}
|
||||
kref_put(&tmp_pg->kref, release_port_group);
|
||||
@ -665,13 +684,13 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
||||
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
|
||||
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
|
||||
pg->pref ? "preferred" : "non-preferred",
|
||||
valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
|
||||
valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
|
||||
valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
|
||||
valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
|
||||
valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
|
||||
valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
|
||||
valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
|
||||
pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
|
||||
pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
|
||||
pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
|
||||
pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
|
||||
pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
|
||||
pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
|
||||
pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
|
||||
|
||||
switch (pg->state) {
|
||||
case SCSI_ACCESS_STATE_TRANSITIONING:
|
||||
|
@ -107,24 +107,6 @@ void fnic_debugfs_terminate(void)
|
||||
vfree(fc_trc_flag);
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
|
||||
* Or Open fc_trace_enable file for fc_trace
|
||||
* @inode: The inode pointer.
|
||||
* @file: The file pointer to attach the trace enable/disable flag.
|
||||
*
|
||||
* Description:
|
||||
* This routine opens a debugsfs file trace_enable or fc_trace_enable.
|
||||
*
|
||||
* Returns:
|
||||
* This function returns zero if successful.
|
||||
*/
|
||||
static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
filp->private_data = inode->i_private;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_trace_ctrl_read -
|
||||
* Read trace_enable ,fc_trace_enable
|
||||
@ -220,7 +202,7 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
|
||||
|
||||
static const struct file_operations fnic_trace_ctrl_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = fnic_trace_ctrl_open,
|
||||
.open = simple_open,
|
||||
.read = fnic_trace_ctrl_read,
|
||||
.write = fnic_trace_ctrl_write,
|
||||
};
|
||||
@ -632,7 +614,7 @@ static ssize_t fnic_reset_stats_write(struct file *file,
|
||||
sizeof(struct io_path_stats) - sizeof(u64));
|
||||
memset(fw_stats_p+1, 0,
|
||||
sizeof(struct fw_stats) - sizeof(u64));
|
||||
getnstimeofday(&stats->stats_timestamps.last_reset_time);
|
||||
ktime_get_real_ts64(&stats->stats_timestamps.last_reset_time);
|
||||
}
|
||||
|
||||
(*ppos)++;
|
||||
|
@ -442,15 +442,13 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
|
||||
vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"process_vlan_resp: FIP VLAN %d\n", vid);
|
||||
vlan = kmalloc(sizeof(*vlan),
|
||||
GFP_ATOMIC);
|
||||
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
|
||||
if (!vlan) {
|
||||
/* retry from timer */
|
||||
spin_unlock_irqrestore(&fnic->vlans_lock,
|
||||
flags);
|
||||
goto out;
|
||||
}
|
||||
memset(vlan, 0, sizeof(struct fcoe_vlan));
|
||||
vlan->vid = vid & 0x0fff;
|
||||
vlan->state = FIP_VLAN_AVAIL;
|
||||
list_add_tail(&vlan->list, &fnic->vlans);
|
||||
|
@ -906,7 +906,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
||||
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"icmnd_cmpl abts pending "
|
||||
"hdr status = %s tag = 0x%x sc = 0x%p"
|
||||
"hdr status = %s tag = 0x%x sc = 0x%p "
|
||||
"scsi_status = %x residual = %d\n",
|
||||
fnic_fcpio_status_to_str(hdr_status),
|
||||
id, sc,
|
||||
|
@ -18,8 +18,8 @@
|
||||
#define _FNIC_STATS_H_
|
||||
|
||||
struct stats_timestamps {
|
||||
struct timespec last_reset_time;
|
||||
struct timespec last_read_time;
|
||||
struct timespec64 last_reset_time;
|
||||
struct timespec64 last_read_time;
|
||||
};
|
||||
|
||||
struct io_path_stats {
|
||||
|
@ -111,7 +111,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
|
||||
int len = 0;
|
||||
unsigned long flags;
|
||||
char str[KSYM_SYMBOL_LEN];
|
||||
struct timespec val;
|
||||
struct timespec64 val;
|
||||
fnic_trace_data_t *tbp;
|
||||
|
||||
spin_lock_irqsave(&fnic_trace_lock, flags);
|
||||
@ -129,10 +129,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
|
||||
/* Convert function pointer to function name */
|
||||
if (sizeof(unsigned long) < 8) {
|
||||
sprint_symbol(str, tbp->fnaddr.low);
|
||||
jiffies_to_timespec(tbp->timestamp.low, &val);
|
||||
jiffies_to_timespec64(tbp->timestamp.low, &val);
|
||||
} else {
|
||||
sprint_symbol(str, tbp->fnaddr.val);
|
||||
jiffies_to_timespec(tbp->timestamp.val, &val);
|
||||
jiffies_to_timespec64(tbp->timestamp.val, &val);
|
||||
}
|
||||
/*
|
||||
* Dump trace buffer entry to memory file
|
||||
@ -140,8 +140,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
|
||||
*/
|
||||
len += snprintf(fnic_dbgfs_prt->buffer + len,
|
||||
(trace_max_pages * PAGE_SIZE * 3) - len,
|
||||
"%16lu.%16lu %-50s %8x %8x %16llx %16llx "
|
||||
"%16llx %16llx %16llx\n", val.tv_sec,
|
||||
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
|
||||
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
|
||||
val.tv_nsec, str, tbp->host_no, tbp->tag,
|
||||
tbp->data[0], tbp->data[1], tbp->data[2],
|
||||
tbp->data[3], tbp->data[4]);
|
||||
@ -171,10 +171,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
|
||||
/* Convert function pointer to function name */
|
||||
if (sizeof(unsigned long) < 8) {
|
||||
sprint_symbol(str, tbp->fnaddr.low);
|
||||
jiffies_to_timespec(tbp->timestamp.low, &val);
|
||||
jiffies_to_timespec64(tbp->timestamp.low, &val);
|
||||
} else {
|
||||
sprint_symbol(str, tbp->fnaddr.val);
|
||||
jiffies_to_timespec(tbp->timestamp.val, &val);
|
||||
jiffies_to_timespec64(tbp->timestamp.val, &val);
|
||||
}
|
||||
/*
|
||||
* Dump trace buffer entry to memory file
|
||||
@ -182,8 +182,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
|
||||
*/
|
||||
len += snprintf(fnic_dbgfs_prt->buffer + len,
|
||||
(trace_max_pages * PAGE_SIZE * 3) - len,
|
||||
"%16lu.%16lu %-50s %8x %8x %16llx %16llx "
|
||||
"%16llx %16llx %16llx\n", val.tv_sec,
|
||||
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
|
||||
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
|
||||
val.tv_nsec, str, tbp->host_no, tbp->tag,
|
||||
tbp->data[0], tbp->data[1], tbp->data[2],
|
||||
tbp->data[3], tbp->data[4]);
|
||||
@ -217,29 +217,29 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||
{
|
||||
int len = 0;
|
||||
int buf_size = debug->buf_size;
|
||||
struct timespec val1, val2;
|
||||
struct timespec64 val1, val2;
|
||||
|
||||
getnstimeofday(&val1);
|
||||
ktime_get_real_ts64(&val1);
|
||||
len = snprintf(debug->debug_buffer + len, buf_size - len,
|
||||
"------------------------------------------\n"
|
||||
"\t\tTime\n"
|
||||
"------------------------------------------\n");
|
||||
|
||||
len += snprintf(debug->debug_buffer + len, buf_size - len,
|
||||
"Current time : [%ld:%ld]\n"
|
||||
"Last stats reset time: [%ld:%ld]\n"
|
||||
"Last stats read time: [%ld:%ld]\n"
|
||||
"delta since last reset: [%ld:%ld]\n"
|
||||
"delta since last read: [%ld:%ld]\n",
|
||||
val1.tv_sec, val1.tv_nsec,
|
||||
stats->stats_timestamps.last_reset_time.tv_sec,
|
||||
"Current time : [%lld:%ld]\n"
|
||||
"Last stats reset time: [%lld:%09ld]\n"
|
||||
"Last stats read time: [%lld:%ld]\n"
|
||||
"delta since last reset: [%lld:%ld]\n"
|
||||
"delta since last read: [%lld:%ld]\n",
|
||||
(s64)val1.tv_sec, val1.tv_nsec,
|
||||
(s64)stats->stats_timestamps.last_reset_time.tv_sec,
|
||||
stats->stats_timestamps.last_reset_time.tv_nsec,
|
||||
stats->stats_timestamps.last_read_time.tv_sec,
|
||||
(s64)stats->stats_timestamps.last_read_time.tv_sec,
|
||||
stats->stats_timestamps.last_read_time.tv_nsec,
|
||||
timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
|
||||
timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
|
||||
timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
|
||||
timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
|
||||
(s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
|
||||
timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
|
||||
(s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
|
||||
timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
|
||||
|
||||
stats->stats_timestamps.last_read_time = val1;
|
||||
|
||||
@ -403,12 +403,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||
"\t\tOther Important Statistics\n"
|
||||
"------------------------------------------\n");
|
||||
|
||||
jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
|
||||
jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
|
||||
jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
|
||||
jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
|
||||
|
||||
len += snprintf(debug->debug_buffer + len, buf_size - len,
|
||||
"Last ISR time: %llu (%8lu.%8lu)\n"
|
||||
"Last ACK time: %llu (%8lu.%8lu)\n"
|
||||
"Last ISR time: %llu (%8llu.%09lu)\n"
|
||||
"Last ACK time: %llu (%8llu.%09lu)\n"
|
||||
"Number of ISRs: %lld\n"
|
||||
"Maximum CQ Entries: %lld\n"
|
||||
"Number of ACK index out of range: %lld\n"
|
||||
@ -425,9 +425,9 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||
"Number of rport not ready: %lld\n"
|
||||
"Number of receive frame errors: %lld\n",
|
||||
(u64)stats->misc_stats.last_isr_time,
|
||||
val1.tv_sec, val1.tv_nsec,
|
||||
(s64)val1.tv_sec, val1.tv_nsec,
|
||||
(u64)stats->misc_stats.last_ack_time,
|
||||
val2.tv_sec, val2.tv_nsec,
|
||||
(s64)val2.tv_sec, val2.tv_nsec,
|
||||
(u64)atomic64_read(&stats->misc_stats.isr_count),
|
||||
(u64)atomic64_read(&stats->misc_stats.max_cq_entries),
|
||||
(u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
|
||||
|
@ -99,12 +99,43 @@ struct hisi_sas_hw_error {
|
||||
const struct hisi_sas_hw_error *sub;
|
||||
};
|
||||
|
||||
struct hisi_sas_rst {
|
||||
struct hisi_hba *hisi_hba;
|
||||
struct completion *completion;
|
||||
struct work_struct work;
|
||||
bool done;
|
||||
};
|
||||
|
||||
#define HISI_SAS_RST_WORK_INIT(r, c) \
|
||||
{ .hisi_hba = hisi_hba, \
|
||||
.completion = &c, \
|
||||
.work = __WORK_INITIALIZER(r.work, \
|
||||
hisi_sas_sync_rst_work_handler), \
|
||||
.done = false, \
|
||||
}
|
||||
|
||||
#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \
|
||||
DECLARE_COMPLETION_ONSTACK(c); \
|
||||
DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \
|
||||
struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c)
|
||||
|
||||
enum hisi_sas_bit_err_type {
|
||||
HISI_SAS_ERR_SINGLE_BIT_ECC = 0x0,
|
||||
HISI_SAS_ERR_MULTI_BIT_ECC = 0x1,
|
||||
};
|
||||
|
||||
enum hisi_sas_phy_event {
|
||||
HISI_PHYE_PHY_UP = 0U,
|
||||
HISI_PHYE_LINK_RESET,
|
||||
HISI_PHYES_NUM,
|
||||
};
|
||||
|
||||
struct hisi_sas_phy {
|
||||
struct work_struct works[HISI_PHYES_NUM];
|
||||
struct hisi_hba *hisi_hba;
|
||||
struct hisi_sas_port *port;
|
||||
struct asd_sas_phy sas_phy;
|
||||
struct sas_identify identify;
|
||||
struct work_struct phyup_ws;
|
||||
u64 port_id; /* from hw */
|
||||
u64 dev_sas_addr;
|
||||
u64 frame_rcvd_size;
|
||||
@ -205,13 +236,16 @@ struct hisi_sas_hw {
|
||||
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
|
||||
struct sas_phy_linkrates *linkrates);
|
||||
enum sas_linkrate (*phy_get_max_linkrate)(void);
|
||||
void (*free_device)(struct hisi_hba *hisi_hba,
|
||||
void (*clear_itct)(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_device *dev);
|
||||
void (*free_device)(struct hisi_sas_device *sas_dev);
|
||||
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
|
||||
void (*dereg_device)(struct hisi_hba *hisi_hba,
|
||||
struct domain_device *device);
|
||||
int (*soft_reset)(struct hisi_hba *hisi_hba);
|
||||
u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
|
||||
int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
|
||||
u8 reg_index, u8 reg_count, u8 *write_data);
|
||||
int max_command_entries;
|
||||
int complete_hdr_size;
|
||||
};
|
||||
@ -225,6 +259,7 @@ struct hisi_hba {
|
||||
struct device *dev;
|
||||
|
||||
void __iomem *regs;
|
||||
void __iomem *sgpio_regs;
|
||||
struct regmap *ctrl;
|
||||
u32 ctrl_reset_reg;
|
||||
u32 ctrl_reset_sts_reg;
|
||||
@ -409,7 +444,8 @@ extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
|
||||
extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
|
||||
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
|
||||
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
|
||||
extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction);
|
||||
extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
|
||||
int direction);
|
||||
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
|
||||
extern void hisi_sas_sata_done(struct sas_task *task,
|
||||
struct hisi_sas_slot *slot);
|
||||
@ -425,5 +461,9 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_slot *slot);
|
||||
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
|
||||
extern void hisi_sas_rst_work_handler(struct work_struct *work);
|
||||
extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
|
||||
extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
|
||||
extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
|
||||
enum hisi_sas_phy_event event);
|
||||
extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
|
||||
#endif
|
||||
|
@ -22,10 +22,12 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
||||
struct domain_device *device,
|
||||
int abort_flag, int tag);
|
||||
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
|
||||
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
|
||||
void *funcdata);
|
||||
|
||||
u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
|
||||
u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
|
||||
{
|
||||
switch (cmd) {
|
||||
switch (fis->command) {
|
||||
case ATA_CMD_FPDMA_WRITE:
|
||||
case ATA_CMD_FPDMA_READ:
|
||||
case ATA_CMD_FPDMA_RECV:
|
||||
@ -77,10 +79,26 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
|
||||
case ATA_CMD_ZAC_MGMT_OUT:
|
||||
return HISI_SAS_SATA_PROTOCOL_NONDATA;
|
||||
default:
|
||||
{
|
||||
if (fis->command == ATA_CMD_SET_MAX) {
|
||||
switch (fis->features) {
|
||||
case ATA_SET_MAX_PASSWD:
|
||||
case ATA_SET_MAX_LOCK:
|
||||
return HISI_SAS_SATA_PROTOCOL_PIO;
|
||||
|
||||
case ATA_SET_MAX_PASSWD_DMA:
|
||||
case ATA_SET_MAX_UNLOCK_DMA:
|
||||
return HISI_SAS_SATA_PROTOCOL_DMA;
|
||||
|
||||
default:
|
||||
return HISI_SAS_SATA_PROTOCOL_NONDATA;
|
||||
}
|
||||
}
|
||||
if (direction == DMA_NONE)
|
||||
return HISI_SAS_SATA_PROTOCOL_NONDATA;
|
||||
return HISI_SAS_SATA_PROTOCOL_PIO;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
|
||||
|
||||
@ -192,7 +210,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
|
||||
|
||||
if (!sas_protocol_ata(task->task_proto))
|
||||
if (slot->n_elem)
|
||||
dma_unmap_sg(dev, task->scatter, slot->n_elem,
|
||||
dma_unmap_sg(dev, task->scatter,
|
||||
task->num_scatter,
|
||||
task->data_dir);
|
||||
|
||||
if (sas_dev)
|
||||
@ -431,7 +450,8 @@ err_out:
|
||||
dev_err(dev, "task prep: failed[%d]!\n", rc);
|
||||
if (!sas_protocol_ata(task->task_proto))
|
||||
if (n_elem)
|
||||
dma_unmap_sg(dev, task->scatter, n_elem,
|
||||
dma_unmap_sg(dev, task->scatter,
|
||||
task->num_scatter,
|
||||
task->data_dir);
|
||||
prep_out:
|
||||
return rc;
|
||||
@ -578,6 +598,9 @@ static int hisi_sas_dev_found(struct domain_device *device)
|
||||
}
|
||||
}
|
||||
|
||||
dev_info(dev, "dev[%d:%x] found\n",
|
||||
sas_dev->device_id, sas_dev->dev_type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -617,7 +640,7 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
|
||||
static void hisi_sas_phyup_work(struct work_struct *work)
|
||||
{
|
||||
struct hisi_sas_phy *phy =
|
||||
container_of(work, struct hisi_sas_phy, phyup_ws);
|
||||
container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
|
||||
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
int phy_no = sas_phy->id;
|
||||
@ -626,10 +649,37 @@ static void hisi_sas_phyup_work(struct work_struct *work)
|
||||
hisi_sas_bytes_dmaed(hisi_hba, phy_no);
|
||||
}
|
||||
|
||||
static void hisi_sas_linkreset_work(struct work_struct *work)
|
||||
{
|
||||
struct hisi_sas_phy *phy =
|
||||
container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
|
||||
hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
|
||||
}
|
||||
|
||||
static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
|
||||
[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
|
||||
[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
|
||||
};
|
||||
|
||||
bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
|
||||
enum hisi_sas_phy_event event)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
||||
|
||||
if (WARN_ON(event >= HISI_PHYES_NUM))
|
||||
return false;
|
||||
|
||||
return queue_work(hisi_hba->wq, &phy->works[event]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
|
||||
|
||||
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
|
||||
{
|
||||
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
int i;
|
||||
|
||||
phy->hisi_hba = hisi_hba;
|
||||
phy->port = NULL;
|
||||
@ -647,7 +697,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
|
||||
sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
|
||||
sas_phy->lldd_phy = phy;
|
||||
|
||||
INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
|
||||
for (i = 0; i < HISI_PHYES_NUM; i++)
|
||||
INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
|
||||
}
|
||||
|
||||
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
|
||||
@ -702,7 +753,7 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
|
||||
hisi_sas_do_release_task(hisi_hba, slot->task, slot);
|
||||
}
|
||||
|
||||
static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
|
||||
void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct hisi_sas_device *sas_dev;
|
||||
struct domain_device *device;
|
||||
@ -719,6 +770,7 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
|
||||
hisi_sas_release_task(hisi_hba, device);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
|
||||
|
||||
static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
|
||||
struct domain_device *device)
|
||||
@ -733,17 +785,21 @@ static void hisi_sas_dev_gone(struct domain_device *device)
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
dev_info(dev, "found dev[%d:%x] is gone\n",
|
||||
dev_info(dev, "dev[%d:%x] is gone\n",
|
||||
sas_dev->device_id, sas_dev->dev_type);
|
||||
|
||||
hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
|
||||
hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
|
||||
hisi_hba->hw->free_device(hisi_hba, sas_dev);
|
||||
device->lldd_dev = NULL;
|
||||
memset(sas_dev, 0, sizeof(*sas_dev));
|
||||
hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
|
||||
device->lldd_dev = NULL;
|
||||
}
|
||||
|
||||
if (hisi_hba->hw->free_device)
|
||||
hisi_hba->hw->free_device(sas_dev);
|
||||
sas_dev->dev_type = SAS_PHY_UNUSED;
|
||||
}
|
||||
|
||||
@ -859,12 +915,13 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
|
||||
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
|
||||
struct hisi_sas_slot *slot = task->lldd_task;
|
||||
|
||||
dev_err(dev, "abort tmf: TMF task timeout\n");
|
||||
dev_err(dev, "abort tmf: TMF task timeout and not done\n");
|
||||
if (slot)
|
||||
slot->task = NULL;
|
||||
|
||||
goto ex_err;
|
||||
}
|
||||
} else
|
||||
dev_err(dev, "abort tmf: TMF task timeout\n");
|
||||
}
|
||||
|
||||
if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
||||
@ -985,27 +1042,42 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
|
||||
sizeof(ssp_task), tmf);
|
||||
}
|
||||
|
||||
static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
|
||||
struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
|
||||
static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct hisi_sas_device *sas_dev;
|
||||
struct domain_device *device;
|
||||
u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
|
||||
sas_dev = &hisi_hba->devices[i];
|
||||
device = sas_dev->sas_device;
|
||||
struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
|
||||
struct domain_device *device = sas_dev->sas_device;
|
||||
struct asd_sas_port *sas_port;
|
||||
struct hisi_sas_port *port;
|
||||
struct hisi_sas_phy *phy = NULL;
|
||||
struct asd_sas_phy *sas_phy;
|
||||
|
||||
if ((sas_dev->dev_type == SAS_PHY_UNUSED)
|
||||
|| !device || (device->port != sas_port))
|
||||
|| !device || !device->port)
|
||||
continue;
|
||||
|
||||
hisi_hba->hw->free_device(hisi_hba, sas_dev);
|
||||
sas_port = device->port;
|
||||
port = to_hisi_sas_port(sas_port);
|
||||
|
||||
/* Update linkrate of directly attached device. */
|
||||
if (!device->parent)
|
||||
device->linkrate = linkrate;
|
||||
list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
|
||||
if (state & BIT(sas_phy->id)) {
|
||||
phy = sas_phy->lldd_phy;
|
||||
break;
|
||||
}
|
||||
|
||||
hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
|
||||
if (phy) {
|
||||
port->id = phy->port_id;
|
||||
|
||||
/* Update linkrate of directly attached device. */
|
||||
if (!device->parent)
|
||||
device->linkrate = phy->sas_phy.linkrate;
|
||||
|
||||
hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
|
||||
} else
|
||||
port->id = 0xff;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1020,21 +1092,17 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
|
||||
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
struct asd_sas_port *sas_port = sas_phy->port;
|
||||
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
|
||||
bool do_port_check = !!(_sas_port != sas_port);
|
||||
|
||||
if (!sas_phy->phy->enabled)
|
||||
continue;
|
||||
|
||||
/* Report PHY state change to libsas */
|
||||
if (state & (1 << phy_no)) {
|
||||
if (do_port_check && sas_port) {
|
||||
if (state & BIT(phy_no)) {
|
||||
if (do_port_check && sas_port && sas_port->port_dev) {
|
||||
struct domain_device *dev = sas_port->port_dev;
|
||||
|
||||
_sas_port = sas_port;
|
||||
port->id = phy->port_id;
|
||||
hisi_sas_refresh_port_id(hisi_hba,
|
||||
sas_port, sas_phy->linkrate);
|
||||
|
||||
if (DEV_IS_EXPANDER(dev->dev_type))
|
||||
sas_ha->notify_port_event(sas_phy,
|
||||
@ -1045,8 +1113,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
|
||||
hisi_sas_phy_down(hisi_hba, phy_no, 0);
|
||||
|
||||
}
|
||||
|
||||
drain_workqueue(hisi_hba->shost->work_q);
|
||||
}
|
||||
|
||||
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
|
||||
@ -1063,7 +1129,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
|
||||
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
|
||||
return -1;
|
||||
|
||||
dev_dbg(dev, "controller resetting...\n");
|
||||
dev_info(dev, "controller resetting...\n");
|
||||
old_state = hisi_hba->hw->get_phys_state(hisi_hba);
|
||||
|
||||
scsi_block_requests(shost);
|
||||
@ -1072,6 +1138,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
|
||||
if (rc) {
|
||||
dev_warn(dev, "controller reset failed (%d)\n", rc);
|
||||
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
scsi_unblock_requests(shost);
|
||||
goto out;
|
||||
}
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
@ -1083,15 +1150,14 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
|
||||
/* Init and wait for PHYs to come up and all libsas event finished. */
|
||||
hisi_hba->hw->phys_init(hisi_hba);
|
||||
msleep(1000);
|
||||
drain_workqueue(hisi_hba->wq);
|
||||
drain_workqueue(shost->work_q);
|
||||
hisi_sas_refresh_port_id(hisi_hba);
|
||||
scsi_unblock_requests(shost);
|
||||
|
||||
state = hisi_hba->hw->get_phys_state(hisi_hba);
|
||||
hisi_sas_rescan_topology(hisi_hba, old_state, state);
|
||||
dev_dbg(dev, "controller reset complete\n");
|
||||
dev_info(dev, "controller reset complete\n");
|
||||
|
||||
out:
|
||||
scsi_unblock_requests(shost);
|
||||
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
||||
|
||||
return rc;
|
||||
@ -1134,6 +1200,11 @@ static int hisi_sas_abort_task(struct sas_task *task)
|
||||
|
||||
rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_CMD, tag);
|
||||
if (rc2 < 0) {
|
||||
dev_err(dev, "abort task: internal abort (%d)\n", rc2);
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the TMF finds that the IO is not in the device and also
|
||||
* the internal abort does not succeed, then it is safe to
|
||||
@ -1151,8 +1222,12 @@ static int hisi_sas_abort_task(struct sas_task *task)
|
||||
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
|
||||
task->task_proto & SAS_PROTOCOL_STP) {
|
||||
if (task->dev->dev_type == SAS_SATA_DEV) {
|
||||
hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "abort task: internal abort failed\n");
|
||||
goto out;
|
||||
}
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
rc = hisi_sas_softreset_ata_disk(device);
|
||||
}
|
||||
@ -1163,7 +1238,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
|
||||
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_CMD, tag);
|
||||
if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
|
||||
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
|
||||
task->lldd_task) {
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_do_release_task(hisi_hba, task, slot);
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
@ -1178,12 +1254,29 @@ out:
|
||||
|
||||
static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct hisi_sas_tmf_task tmf_task;
|
||||
int rc = TMF_RESP_FUNC_FAILED;
|
||||
unsigned long flags;
|
||||
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
}
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
|
||||
tmf_task.tmf = TMF_ABORT_TASK_SET;
|
||||
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
|
||||
|
||||
if (rc == TMF_RESP_FUNC_COMPLETE) {
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_release_task(hisi_hba, device);
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1213,20 +1306,25 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
|
||||
{
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
||||
unsigned long flags;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int rc = TMF_RESP_FUNC_FAILED;
|
||||
unsigned long flags;
|
||||
|
||||
if (sas_dev->dev_status != HISI_SAS_DEV_EH)
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
|
||||
|
||||
hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
}
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
|
||||
rc = hisi_sas_debug_I_T_nexus_reset(device);
|
||||
|
||||
if (rc == TMF_RESP_FUNC_COMPLETE) {
|
||||
if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_release_task(hisi_hba, device);
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
@ -1249,8 +1347,10 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
|
||||
/* Clear internal IO and then hardreset */
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
if (rc == TMF_RESP_FUNC_FAILED)
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "lu_reset: internal abort failed\n");
|
||||
goto out;
|
||||
}
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
|
||||
phy = sas_get_local_phy(device);
|
||||
@ -1266,6 +1366,14 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
|
||||
} else {
|
||||
struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
|
||||
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "lu_reset: internal abort failed\n");
|
||||
goto out;
|
||||
}
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
|
||||
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
|
||||
if (rc == TMF_RESP_FUNC_COMPLETE) {
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
@ -1283,8 +1391,14 @@ out:
|
||||
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
|
||||
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
|
||||
|
||||
return hisi_sas_controller_reset(hisi_hba);
|
||||
queue_work(hisi_hba->wq, &r.work);
|
||||
wait_for_completion(r.completion);
|
||||
if (r.done)
|
||||
return TMF_RESP_FUNC_COMPLETE;
|
||||
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
}
|
||||
|
||||
static int hisi_sas_query_task(struct sas_task *task)
|
||||
@ -1441,8 +1555,14 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int res;
|
||||
|
||||
/*
|
||||
* The interface is not realized means this HW don't support internal
|
||||
* abort, or don't need to do internal abort. Then here, we return
|
||||
* TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
|
||||
* the internal abort has been executed and returned CQ.
|
||||
*/
|
||||
if (!hisi_hba->hw->prep_abort)
|
||||
return -EOPNOTSUPP;
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
|
||||
task = sas_alloc_slow_task(GFP_KERNEL);
|
||||
if (!task)
|
||||
@ -1473,9 +1593,11 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
||||
|
||||
if (slot)
|
||||
slot->task = NULL;
|
||||
dev_err(dev, "internal task abort: timeout.\n");
|
||||
dev_err(dev, "internal task abort: timeout and not done.\n");
|
||||
res = -EIO;
|
||||
goto exit;
|
||||
}
|
||||
} else
|
||||
dev_err(dev, "internal task abort: timeout.\n");
|
||||
}
|
||||
|
||||
if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
||||
@ -1507,6 +1629,22 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
|
||||
hisi_sas_port_notify_formed(sas_phy);
|
||||
}
|
||||
|
||||
static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
|
||||
{
|
||||
}
|
||||
|
||||
static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
|
||||
u8 reg_index, u8 reg_count, u8 *write_data)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
|
||||
if (!hisi_hba->hw->write_gpio)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
|
||||
reg_index, reg_count, write_data);
|
||||
}
|
||||
|
||||
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
|
||||
{
|
||||
phy->phy_attached = 0;
|
||||
@ -1561,6 +1699,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
|
||||
struct scsi_transport_template *hisi_sas_stt;
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_stt);
|
||||
|
||||
static struct device_attribute *host_attrs[] = {
|
||||
&dev_attr_phy_event_threshold,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct scsi_host_template _hisi_sas_sht = {
|
||||
.module = THIS_MODULE,
|
||||
.name = DRV_NAME,
|
||||
@ -1580,6 +1723,7 @@ static struct scsi_host_template _hisi_sas_sht = {
|
||||
.eh_target_reset_handler = sas_eh_target_reset_handler,
|
||||
.target_destroy = sas_target_destroy,
|
||||
.ioctl = sas_ioctl,
|
||||
.shost_attrs = host_attrs,
|
||||
};
|
||||
struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_sht);
|
||||
@ -1597,6 +1741,8 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
|
||||
.lldd_query_task = hisi_sas_query_task,
|
||||
.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
|
||||
.lldd_port_formed = hisi_sas_port_formed,
|
||||
.lldd_port_deformed = hisi_sas_port_deformed,
|
||||
.lldd_write_gpio = hisi_sas_write_gpio,
|
||||
};
|
||||
|
||||
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
|
||||
@ -1657,6 +1803,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
|
||||
cq->hisi_hba = hisi_hba;
|
||||
|
||||
/* Delivery queue structure */
|
||||
spin_lock_init(&dq->lock);
|
||||
dq->id = i;
|
||||
dq->hisi_hba = hisi_hba;
|
||||
|
||||
@ -1803,6 +1950,17 @@ void hisi_sas_rst_work_handler(struct work_struct *work)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
|
||||
|
||||
void hisi_sas_sync_rst_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct hisi_sas_rst *rst =
|
||||
container_of(work, struct hisi_sas_rst, work);
|
||||
|
||||
if (!hisi_sas_controller_reset(rst->hisi_hba))
|
||||
rst->done = true;
|
||||
complete(rst->completion);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
|
||||
|
||||
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
@ -1909,6 +2067,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
|
||||
if (IS_ERR(hisi_hba->regs))
|
||||
goto err_out;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
if (res) {
|
||||
hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(hisi_hba->sgpio_regs))
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (hisi_sas_alloc(hisi_hba, shost)) {
|
||||
hisi_sas_free(hisi_hba);
|
||||
goto err_out;
|
||||
|
@ -544,7 +544,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
|
||||
(0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
|
||||
}
|
||||
|
||||
static void free_device_v1_hw(struct hisi_hba *hisi_hba,
|
||||
static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_device *sas_dev)
|
||||
{
|
||||
u64 dev_id = sas_dev->device_id;
|
||||
@ -1482,7 +1482,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
|
||||
else if (phy->identify.device_type != SAS_PHY_UNUSED)
|
||||
phy->identify.target_port_protocols =
|
||||
SAS_PROTOCOL_SMP;
|
||||
queue_work(hisi_hba->wq, &phy->phyup_ws);
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
|
||||
|
||||
end:
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
|
||||
@ -1850,7 +1850,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
|
||||
.hw_init = hisi_sas_v1_init,
|
||||
.setup_itct = setup_itct_v1_hw,
|
||||
.sl_notify = sl_notify_v1_hw,
|
||||
.free_device = free_device_v1_hw,
|
||||
.clear_itct = clear_itct_v1_hw,
|
||||
.prep_smp = prep_smp_v1_hw,
|
||||
.prep_ssp = prep_ssp_v1_hw,
|
||||
.get_free_slot = get_free_slot_v1_hw,
|
||||
|
@ -240,7 +240,12 @@
|
||||
#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
|
||||
#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
|
||||
#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
|
||||
#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
|
||||
#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
|
||||
#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
|
||||
#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
|
||||
#define CHL_INT2 (PORT_BASE + 0x1bc)
|
||||
#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
|
||||
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
|
||||
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
|
||||
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
|
||||
@ -952,7 +957,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
|
||||
(0x1ULL << ITCT_HDR_RTOLT_OFF));
|
||||
}
|
||||
|
||||
static void free_device_v2_hw(struct hisi_hba *hisi_hba,
|
||||
static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_device *sas_dev)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
@ -963,10 +968,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
|
||||
|
||||
sas_dev->completion = &completion;
|
||||
|
||||
/* SoC bug workaround */
|
||||
if (dev_is_sata(sas_dev->sas_device))
|
||||
clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
|
||||
|
||||
/* clear the itct interrupt state */
|
||||
if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
|
||||
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
|
||||
@ -981,6 +982,15 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
|
||||
}
|
||||
}
|
||||
|
||||
static void free_device_v2_hw(struct hisi_sas_device *sas_dev)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
|
||||
|
||||
/* SoC bug workaround */
|
||||
if (dev_is_sata(sas_dev->sas_device))
|
||||
clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
|
||||
}
|
||||
|
||||
static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
int i, reset_val;
|
||||
@ -1177,8 +1187,8 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
|
||||
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
|
||||
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
|
||||
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
|
||||
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
|
||||
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
|
||||
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff857fff);
|
||||
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbfe);
|
||||
hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc);
|
||||
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
|
||||
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
|
||||
@ -2356,6 +2366,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
|
||||
if (unlikely(aborted)) {
|
||||
dev_dbg(dev, "slot_complete: task(%p) aborted\n", task);
|
||||
ts->stat = SAS_ABORTED_TASK;
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_slot_task_free(hisi_hba, task, slot);
|
||||
@ -2400,6 +2411,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
|
||||
u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
|
||||
>> CMPLT_HDR_ERR_PHASE_OFF;
|
||||
u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
|
||||
|
||||
/* Analyse error happens on which phase TX or RX */
|
||||
if (ERR_ON_TX_PHASE(err_phase))
|
||||
@ -2407,6 +2419,16 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
else if (ERR_ON_RX_PHASE(err_phase))
|
||||
slot_err_v2_hw(hisi_hba, task, slot, 2);
|
||||
|
||||
if (ts->stat != SAS_DATA_UNDERRUN)
|
||||
dev_info(dev, "erroneous completion iptt=%d task=%p "
|
||||
"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
|
||||
"Error info: 0x%x 0x%x 0x%x 0x%x\n",
|
||||
slot->idx, task,
|
||||
complete_hdr->dw0, complete_hdr->dw1,
|
||||
complete_hdr->act, complete_hdr->dw3,
|
||||
error_info[0], error_info[1],
|
||||
error_info[2], error_info[3]);
|
||||
|
||||
if (unlikely(slot->abort))
|
||||
return ts->stat;
|
||||
goto out;
|
||||
@ -2456,7 +2478,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
}
|
||||
|
||||
if (!slot->port->port_attached) {
|
||||
dev_err(dev, "slot complete: port %d has removed\n",
|
||||
dev_warn(dev, "slot complete: port %d has removed\n",
|
||||
slot->port->sas_port.id);
|
||||
ts->stat = SAS_PHY_DOWN;
|
||||
}
|
||||
@ -2517,7 +2539,7 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
|
||||
dw1 |= 1 << CMD_HDR_RESET_OFF;
|
||||
|
||||
dw1 |= (hisi_sas_get_ata_protocol(
|
||||
task->ata_task.fis.command, task->data_dir))
|
||||
&task->ata_task.fis, task->data_dir))
|
||||
<< CMD_HDR_FRAME_TYPE_OFF;
|
||||
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
|
||||
hdr->dw1 = cpu_to_le32(dw1);
|
||||
@ -2687,7 +2709,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
if (!timer_pending(&hisi_hba->timer))
|
||||
set_link_timer_quirk(hisi_hba);
|
||||
}
|
||||
queue_work(hisi_hba->wq, &phy->phyup_ws);
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
|
||||
|
||||
end:
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
|
||||
@ -2713,10 +2735,12 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
u32 phy_state, sl_ctrl, txid_auto;
|
||||
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
||||
struct hisi_sas_port *port = phy->port;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
|
||||
|
||||
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
|
||||
dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
|
||||
hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
|
||||
|
||||
sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
|
||||
@ -2813,6 +2837,33 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
|
||||
}
|
||||
|
||||
static const struct hisi_sas_hw_error port_ecc_axi_error[] = {
|
||||
{
|
||||
.irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_ERR_OFF),
|
||||
.msg = "dmac_tx_ecc_bad_err",
|
||||
},
|
||||
{
|
||||
.irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_ERR_OFF),
|
||||
.msg = "dmac_rx_ecc_bad_err",
|
||||
},
|
||||
{
|
||||
.irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
|
||||
.msg = "dma_tx_axi_wr_err",
|
||||
},
|
||||
{
|
||||
.irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
|
||||
.msg = "dma_tx_axi_rd_err",
|
||||
},
|
||||
{
|
||||
.irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
|
||||
.msg = "dma_rx_axi_wr_err",
|
||||
},
|
||||
{
|
||||
.irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
|
||||
.msg = "dma_rx_axi_rd_err",
|
||||
},
|
||||
};
|
||||
|
||||
static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = p;
|
||||
@ -2829,40 +2880,55 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
|
||||
HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
|
||||
|
||||
while (irq_msk) {
|
||||
if (irq_msk & (1 << phy_no)) {
|
||||
u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
CHL_INT0);
|
||||
u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
CHL_INT1);
|
||||
u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
CHL_INT2);
|
||||
u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
CHL_INT0);
|
||||
u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
CHL_INT1);
|
||||
u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
CHL_INT2);
|
||||
|
||||
if (irq_value1) {
|
||||
if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
|
||||
CHL_INT1_DMAC_TX_ECC_ERR_MSK))
|
||||
panic("%s: DMAC RX/TX ecc bad error!\
|
||||
(0x%x)",
|
||||
dev_name(dev), irq_value1);
|
||||
if ((irq_msk & (1 << phy_no)) && irq_value1) {
|
||||
int i;
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
CHL_INT1, irq_value1);
|
||||
for (i = 0; i < ARRAY_SIZE(port_ecc_axi_error); i++) {
|
||||
const struct hisi_sas_hw_error *error =
|
||||
&port_ecc_axi_error[i];
|
||||
|
||||
if (!(irq_value1 & error->irq_msk))
|
||||
continue;
|
||||
|
||||
dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
|
||||
error->msg, phy_no, irq_value1);
|
||||
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
|
||||
}
|
||||
|
||||
if (irq_value2)
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
CHL_INT2, irq_value2);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
CHL_INT1, irq_value1);
|
||||
}
|
||||
|
||||
if ((irq_msk & (1 << phy_no)) && irq_value2) {
|
||||
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
||||
|
||||
if (irq_value0) {
|
||||
if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
|
||||
phy_bcast_v2_hw(phy_no, hisi_hba);
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
CHL_INT0, irq_value0
|
||||
& (~CHL_INT0_HOTPLUG_TOUT_MSK)
|
||||
& (~CHL_INT0_SL_PHY_ENABLE_MSK)
|
||||
& (~CHL_INT0_NOT_RDY_MSK));
|
||||
if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
|
||||
dev_warn(dev, "phy%d identify timeout\n",
|
||||
phy_no);
|
||||
hisi_sas_notify_phy_event(phy,
|
||||
HISI_PHYE_LINK_RESET);
|
||||
}
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
CHL_INT2, irq_value2);
|
||||
}
|
||||
|
||||
if ((irq_msk & (1 << phy_no)) && irq_value0) {
|
||||
if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
|
||||
phy_bcast_v2_hw(phy_no, hisi_hba);
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
CHL_INT0, irq_value0
|
||||
& (~CHL_INT0_HOTPLUG_TOUT_MSK)
|
||||
& (~CHL_INT0_SL_PHY_ENABLE_MSK)
|
||||
& (~CHL_INT0_NOT_RDY_MSK));
|
||||
}
|
||||
irq_msk &= ~(1 << phy_no);
|
||||
phy_no++;
|
||||
@ -2906,7 +2972,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
|
||||
val = hisi_sas_read32(hisi_hba, ecc_error->reg);
|
||||
val &= ecc_error->msk;
|
||||
val >>= ecc_error->shift;
|
||||
dev_warn(dev, ecc_error->msg, irq_value, val);
|
||||
dev_err(dev, ecc_error->msg, irq_value, val);
|
||||
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
|
||||
}
|
||||
}
|
||||
@ -3015,12 +3081,12 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
|
||||
for (; sub->msk || sub->msg; sub++) {
|
||||
if (!(err_value & sub->msk))
|
||||
continue;
|
||||
dev_warn(dev, "%s (0x%x) found!\n",
|
||||
dev_err(dev, "%s (0x%x) found!\n",
|
||||
sub->msg, irq_value);
|
||||
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
|
||||
}
|
||||
} else {
|
||||
dev_warn(dev, "%s (0x%x) found!\n",
|
||||
dev_err(dev, "%s (0x%x) found!\n",
|
||||
axi_error->msg, irq_value);
|
||||
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
|
||||
}
|
||||
@ -3206,7 +3272,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
|
||||
phy->identify.device_type = SAS_SATA_DEV;
|
||||
phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
|
||||
phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
|
||||
queue_work(hisi_hba->wq, &phy->phyup_ws);
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
|
||||
|
||||
end:
|
||||
hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
|
||||
@ -3392,7 +3458,7 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
|
||||
|
||||
udelay(10);
|
||||
if (cnt++ > 10) {
|
||||
dev_info(dev, "wait axi bus state to idle timeout!\n");
|
||||
dev_err(dev, "wait axi bus state to idle timeout!\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -3408,6 +3474,44 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
|
||||
u8 reg_index, u8 reg_count, u8 *write_data)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int phy_no, count;
|
||||
|
||||
if (!hisi_hba->sgpio_regs)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (reg_type) {
|
||||
case SAS_GPIO_REG_TX:
|
||||
count = reg_count * 4;
|
||||
count = min(count, hisi_hba->n_phy);
|
||||
|
||||
for (phy_no = 0; phy_no < count; phy_no++) {
|
||||
/*
|
||||
* GPIO_TX[n] register has the highest numbered drive
|
||||
* of the four in the first byte and the lowest
|
||||
* numbered drive in the fourth byte.
|
||||
* See SFF-8485 Rev. 0.7 Table 24.
|
||||
*/
|
||||
void __iomem *reg_addr = hisi_hba->sgpio_regs +
|
||||
reg_index * 4 + phy_no;
|
||||
int data_idx = phy_no + 3 - (phy_no % 4) * 2;
|
||||
|
||||
writeb(write_data[data_idx], reg_addr);
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
|
||||
reg_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct hisi_sas_hw hisi_sas_v2_hw = {
|
||||
.hw_init = hisi_sas_v2_init,
|
||||
.setup_itct = setup_itct_v2_hw,
|
||||
@ -3415,6 +3519,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
|
||||
.alloc_dev = alloc_dev_quirk_v2_hw,
|
||||
.sl_notify = sl_notify_v2_hw,
|
||||
.get_wideport_bitmap = get_wideport_bitmap_v2_hw,
|
||||
.clear_itct = clear_itct_v2_hw,
|
||||
.free_device = free_device_v2_hw,
|
||||
.prep_smp = prep_smp_v2_hw,
|
||||
.prep_ssp = prep_ssp_v2_hw,
|
||||
@ -3434,6 +3539,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
|
||||
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
|
||||
.soft_reset = soft_reset_v2_hw,
|
||||
.get_phys_state = get_phys_state_v2_hw,
|
||||
.write_gpio = write_gpio_v2_hw,
|
||||
};
|
||||
|
||||
static int hisi_sas_v2_probe(struct platform_device *pdev)
|
||||
|
@ -140,6 +140,7 @@
|
||||
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
|
||||
#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
|
||||
#define STP_LINK_TIMER (PORT_BASE + 0x120)
|
||||
#define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124)
|
||||
#define CON_CFG_DRIVER (PORT_BASE + 0x130)
|
||||
#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
|
||||
#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
|
||||
@ -165,6 +166,8 @@
|
||||
#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
|
||||
#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
|
||||
#define CHL_INT2 (PORT_BASE + 0x1bc)
|
||||
#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
|
||||
#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
|
||||
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
|
||||
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
|
||||
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
|
||||
@ -204,6 +207,13 @@
|
||||
#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8
|
||||
#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
|
||||
|
||||
/* RAS registers need init */
|
||||
#define RAS_BASE (0x6000)
|
||||
#define SAS_RAS_INTR0 (RAS_BASE)
|
||||
#define SAS_RAS_INTR1 (RAS_BASE + 0x04)
|
||||
#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
|
||||
#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
|
||||
|
||||
/* HW dma structures */
|
||||
/* Delivery queue header */
|
||||
/* dw0 */
|
||||
@ -422,7 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
|
||||
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
|
||||
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
|
||||
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
|
||||
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
|
||||
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
|
||||
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
|
||||
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
|
||||
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
|
||||
@ -496,6 +506,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
|
||||
|
||||
hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
|
||||
upper_32_bits(hisi_hba->initial_fis_dma));
|
||||
|
||||
/* RAS registers init */
|
||||
hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
|
||||
hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
|
||||
}
|
||||
|
||||
static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
@ -588,7 +602,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
|
||||
(0x1ULL << ITCT_HDR_RTOLT_OFF));
|
||||
}
|
||||
|
||||
static void free_device_v3_hw(struct hisi_hba *hisi_hba,
|
||||
static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_device *sas_dev)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
@ -1033,7 +1047,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
|
||||
dw1 |= 1 << CMD_HDR_RESET_OFF;
|
||||
|
||||
dw1 |= (hisi_sas_get_ata_protocol(
|
||||
task->ata_task.fis.command, task->data_dir))
|
||||
&task->ata_task.fis, task->data_dir))
|
||||
<< CMD_HDR_FRAME_TYPE_OFF;
|
||||
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
|
||||
|
||||
@ -1138,7 +1152,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
struct dev_to_host_fis *fis;
|
||||
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
|
||||
|
||||
dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
|
||||
dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
|
||||
initial_fis = &hisi_hba->initial_fis[phy_no];
|
||||
fis = &initial_fis->fis;
|
||||
sas_phy->oob_mode = SATA_OOB_MODE;
|
||||
@ -1181,7 +1195,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
|
||||
phy->port_id = port_id;
|
||||
phy->phy_attached = 1;
|
||||
queue_work(hisi_hba->wq, &phy->phyup_ws);
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
|
||||
|
||||
end:
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
|
||||
@ -1322,7 +1336,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
|
||||
if (!(irq_value1 & error->irq_msk))
|
||||
continue;
|
||||
|
||||
dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
|
||||
dev_err(dev, "%s error (phy%d 0x%x) found!\n",
|
||||
error->msg, phy_no, irq_value1);
|
||||
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
|
||||
}
|
||||
@ -1331,9 +1345,31 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
|
||||
CHL_INT1, irq_value1);
|
||||
}
|
||||
|
||||
if (irq_msk & (8 << (phy_no * 4)) && irq_value2)
|
||||
if (irq_msk & (8 << (phy_no * 4)) && irq_value2) {
|
||||
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
||||
|
||||
if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
|
||||
dev_warn(dev, "phy%d identify timeout\n",
|
||||
phy_no);
|
||||
hisi_sas_notify_phy_event(phy,
|
||||
HISI_PHYE_LINK_RESET);
|
||||
|
||||
}
|
||||
|
||||
if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
|
||||
u32 reg_value = hisi_sas_phy_read32(hisi_hba,
|
||||
phy_no, STP_LINK_TIMEOUT_STATE);
|
||||
|
||||
dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
|
||||
phy_no, reg_value);
|
||||
if (reg_value & BIT(4))
|
||||
hisi_sas_notify_phy_event(phy,
|
||||
HISI_PHYE_LINK_RESET);
|
||||
}
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
CHL_INT2, irq_value2);
|
||||
}
|
||||
|
||||
|
||||
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
|
||||
@ -1432,12 +1468,12 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
|
||||
if (!(err_value & sub->msk))
|
||||
continue;
|
||||
|
||||
dev_warn(dev, "%s error (0x%x) found!\n",
|
||||
dev_err(dev, "%s error (0x%x) found!\n",
|
||||
sub->msg, irq_value);
|
||||
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
|
||||
}
|
||||
} else {
|
||||
dev_warn(dev, "%s error (0x%x) found!\n",
|
||||
dev_err(dev, "%s error (0x%x) found!\n",
|
||||
error->msg, irq_value);
|
||||
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
|
||||
}
|
||||
@ -1542,6 +1578,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
memset(ts, 0, sizeof(*ts));
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
if (unlikely(aborted)) {
|
||||
dev_dbg(dev, "slot complete: task(%p) aborted\n", task);
|
||||
ts->stat = SAS_ABORTED_TASK;
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_slot_task_free(hisi_hba, task, slot);
|
||||
@ -1583,7 +1620,18 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
|
||||
/* check for erroneous completion */
|
||||
if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
|
||||
u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
|
||||
|
||||
slot_err_v3_hw(hisi_hba, task, slot);
|
||||
if (ts->stat != SAS_DATA_UNDERRUN)
|
||||
dev_info(dev, "erroneous completion iptt=%d task=%p "
|
||||
"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
|
||||
"Error info: 0x%x 0x%x 0x%x 0x%x\n",
|
||||
slot->idx, task,
|
||||
complete_hdr->dw0, complete_hdr->dw1,
|
||||
complete_hdr->act, complete_hdr->dw3,
|
||||
error_info[0], error_info[1],
|
||||
error_info[2], error_info[3]);
|
||||
if (unlikely(slot->abort))
|
||||
return ts->stat;
|
||||
goto out;
|
||||
@ -1628,7 +1676,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
}
|
||||
|
||||
if (!slot->port->port_attached) {
|
||||
dev_err(dev, "slot complete: port %d has removed\n",
|
||||
dev_warn(dev, "slot complete: port %d has removed\n",
|
||||
slot->port->sas_port.id);
|
||||
ts->stat = SAS_PHY_DOWN;
|
||||
}
|
||||
@ -1653,9 +1701,8 @@ static void cq_tasklet_v3_hw(unsigned long val)
|
||||
struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
|
||||
struct hisi_hba *hisi_hba = cq->hisi_hba;
|
||||
struct hisi_sas_slot *slot;
|
||||
struct hisi_sas_itct *itct;
|
||||
struct hisi_sas_complete_v3_hdr *complete_queue;
|
||||
u32 rd_point = cq->rd_point, wr_point, dev_id;
|
||||
u32 rd_point = cq->rd_point, wr_point;
|
||||
int queue = cq->id;
|
||||
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
|
||||
|
||||
@ -1671,38 +1718,11 @@ static void cq_tasklet_v3_hw(unsigned long val)
|
||||
|
||||
complete_hdr = &complete_queue[rd_point];
|
||||
|
||||
/* Check for NCQ completion */
|
||||
if (complete_hdr->act) {
|
||||
u32 act_tmp = complete_hdr->act;
|
||||
int ncq_tag_count = ffs(act_tmp);
|
||||
|
||||
dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
|
||||
CMPLT_HDR_DEV_ID_OFF;
|
||||
itct = &hisi_hba->itct[dev_id];
|
||||
|
||||
/* The NCQ tags are held in the itct header */
|
||||
while (ncq_tag_count) {
|
||||
__le64 *ncq_tag = &itct->qw4_15[0];
|
||||
|
||||
ncq_tag_count -= 1;
|
||||
iptt = (ncq_tag[ncq_tag_count / 5]
|
||||
>> (ncq_tag_count % 5) * 12) & 0xfff;
|
||||
|
||||
slot = &hisi_hba->slot_info[iptt];
|
||||
slot->cmplt_queue_slot = rd_point;
|
||||
slot->cmplt_queue = queue;
|
||||
slot_complete_v3_hw(hisi_hba, slot);
|
||||
|
||||
act_tmp &= ~(1 << ncq_tag_count);
|
||||
ncq_tag_count = ffs(act_tmp);
|
||||
}
|
||||
} else {
|
||||
iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
|
||||
slot = &hisi_hba->slot_info[iptt];
|
||||
slot->cmplt_queue_slot = rd_point;
|
||||
slot->cmplt_queue = queue;
|
||||
slot_complete_v3_hw(hisi_hba, slot);
|
||||
}
|
||||
iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
|
||||
slot = &hisi_hba->slot_info[iptt];
|
||||
slot->cmplt_queue_slot = rd_point;
|
||||
slot->cmplt_queue = queue;
|
||||
slot_complete_v3_hw(hisi_hba, slot);
|
||||
|
||||
if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
|
||||
rd_point = 0;
|
||||
@ -1951,7 +1971,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
|
||||
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
|
||||
.get_wideport_bitmap = get_wideport_bitmap_v3_hw,
|
||||
.complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
|
||||
.free_device = free_device_v3_hw,
|
||||
.clear_itct = clear_itct_v3_hw,
|
||||
.sl_notify = sl_notify_v3_hw,
|
||||
.prep_ssp = prep_ssp_v3_hw,
|
||||
.prep_smp = prep_smp_v3_hw,
|
||||
@ -2157,21 +2177,243 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
|
||||
scsi_host_put(shost);
|
||||
}
|
||||
|
||||
static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = {
|
||||
{ .irq_msk = BIT(19), .msg = "HILINK_INT" },
|
||||
{ .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" },
|
||||
{ .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" },
|
||||
{ .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" },
|
||||
{ .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" },
|
||||
{ .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" },
|
||||
{ .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" },
|
||||
{ .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" },
|
||||
{ .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" },
|
||||
{ .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" },
|
||||
{ .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" },
|
||||
{ .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" },
|
||||
{ .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" },
|
||||
};
|
||||
|
||||
static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
|
||||
{ .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" },
|
||||
{ .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" },
|
||||
{ .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" },
|
||||
{ .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" },
|
||||
{ .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" },
|
||||
{ .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" },
|
||||
{ .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" },
|
||||
{ .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" },
|
||||
{ .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" },
|
||||
{ .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" },
|
||||
{ .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" },
|
||||
{ .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" },
|
||||
{ .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
|
||||
};
|
||||
|
||||
static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
const struct hisi_sas_hw_error *ras_error;
|
||||
bool need_reset = false;
|
||||
u32 irq_value;
|
||||
int i;
|
||||
|
||||
irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0);
|
||||
for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) {
|
||||
ras_error = &sas_ras_intr0_nfe[i];
|
||||
if (ras_error->irq_msk & irq_value) {
|
||||
dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n",
|
||||
ras_error->msg, irq_value);
|
||||
need_reset = true;
|
||||
}
|
||||
}
|
||||
hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value);
|
||||
|
||||
irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1);
|
||||
for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) {
|
||||
ras_error = &sas_ras_intr1_nfe[i];
|
||||
if (ras_error->irq_msk & irq_value) {
|
||||
dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n",
|
||||
ras_error->msg, irq_value);
|
||||
need_reset = true;
|
||||
}
|
||||
}
|
||||
hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
|
||||
|
||||
return need_reset;
|
||||
}
|
||||
|
||||
static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev,
|
||||
pci_channel_state_t state)
|
||||
{
|
||||
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state);
|
||||
if (state == pci_channel_io_perm_failure)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
if (process_non_fatal_error_v3_hw(hisi_hba))
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
|
||||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
}
|
||||
|
||||
static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev)
|
||||
{
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
|
||||
{
|
||||
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
|
||||
|
||||
dev_info(dev, "PCI error: slot reset callback!!\n");
|
||||
queue_work(hisi_hba->wq, &r.work);
|
||||
wait_for_completion(r.completion);
|
||||
if (r.done)
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
enum {
|
||||
/* instances of the controller */
|
||||
hip08,
|
||||
};
|
||||
|
||||
static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct Scsi_Host *shost = hisi_hba->shost;
|
||||
u32 device_state, status;
|
||||
int rc;
|
||||
u32 reg_val;
|
||||
unsigned long flags;
|
||||
|
||||
if (!pdev->pm_cap) {
|
||||
dev_err(dev, "PCI PM not supported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
||||
scsi_block_requests(shost);
|
||||
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
flush_workqueue(hisi_hba->wq);
|
||||
/* disable DQ/PHY/bus */
|
||||
interrupt_disable_v3_hw(hisi_hba);
|
||||
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
|
||||
hisi_sas_kill_tasklets(hisi_hba);
|
||||
|
||||
hisi_sas_stop_phys(hisi_hba);
|
||||
|
||||
reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
|
||||
AM_CTRL_GLOBAL);
|
||||
reg_val |= 0x1;
|
||||
hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
|
||||
AM_CTRL_GLOBAL, reg_val);
|
||||
|
||||
/* wait until bus idle */
|
||||
rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
|
||||
AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
|
||||
if (rc) {
|
||||
dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
|
||||
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
||||
scsi_unblock_requests(shost);
|
||||
return rc;
|
||||
}
|
||||
|
||||
hisi_sas_init_mem(hisi_hba);
|
||||
|
||||
device_state = pci_choose_state(pdev, state);
|
||||
dev_warn(dev, "entering operating state [D%d]\n",
|
||||
device_state);
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, device_state);
|
||||
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_release_tasks(hisi_hba);
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
|
||||
sas_suspend_ha(sha);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hisi_sas_v3_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
struct Scsi_Host *shost = hisi_hba->shost;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
unsigned int rc;
|
||||
u32 device_state = pdev->current_state;
|
||||
|
||||
dev_warn(dev, "resuming from operating state [D%d]\n",
|
||||
device_state);
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_enable_wake(pdev, PCI_D0, 0);
|
||||
pci_restore_state(pdev);
|
||||
rc = pci_enable_device(pdev);
|
||||
if (rc)
|
||||
dev_err(dev, "enable device failed during resume (%d)\n", rc);
|
||||
|
||||
pci_set_master(pdev);
|
||||
scsi_unblock_requests(shost);
|
||||
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
|
||||
sas_prep_resume_ha(sha);
|
||||
init_reg_v3_hw(hisi_hba);
|
||||
hisi_hba->hw->phys_init(hisi_hba);
|
||||
sas_resume_ha(sha);
|
||||
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct pci_device_id sas_v3_pci_table[] = {
|
||||
{ PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct pci_error_handlers hisi_sas_err_handler = {
|
||||
.error_detected = hisi_sas_error_detected_v3_hw,
|
||||
.mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
|
||||
.slot_reset = hisi_sas_slot_reset_v3_hw,
|
||||
};
|
||||
|
||||
static struct pci_driver sas_v3_pci_driver = {
|
||||
.name = DRV_NAME,
|
||||
.id_table = sas_v3_pci_table,
|
||||
.probe = hisi_sas_v3_probe,
|
||||
.remove = hisi_sas_v3_remove,
|
||||
.suspend = hisi_sas_v3_suspend,
|
||||
.resume = hisi_sas_v3_resume,
|
||||
.err_handler = &hisi_sas_err_handler,
|
||||
};
|
||||
|
||||
module_pci_driver(sas_v3_pci_driver);
|
||||
|
@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev)
|
||||
|
||||
scsi_proc_hostdir_rm(shost->hostt);
|
||||
|
||||
/* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
|
||||
rcu_barrier();
|
||||
|
||||
if (shost->tmf_work_q)
|
||||
destroy_workqueue(shost->tmf_work_q);
|
||||
if (shost->ehandler)
|
||||
@ -325,6 +328,8 @@ static void scsi_host_dev_release(struct device *dev)
|
||||
if (shost->work_q)
|
||||
destroy_workqueue(shost->work_q);
|
||||
|
||||
destroy_rcu_head(&shost->rcu);
|
||||
|
||||
if (shost->shost_state == SHOST_CREATED) {
|
||||
/*
|
||||
* Free the shost_dev device name here if scsi_host_alloc()
|
||||
@ -399,6 +404,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
||||
INIT_LIST_HEAD(&shost->starved_list);
|
||||
init_waitqueue_head(&shost->host_wait);
|
||||
mutex_init(&shost->scan_mutex);
|
||||
init_rcu_head(&shost->rcu);
|
||||
|
||||
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
|
||||
if (index < 0)
|
||||
|
@ -3518,7 +3518,7 @@ out:
|
||||
|
||||
if (rc != IO_OK)
|
||||
hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
|
||||
"Error, could not get enclosure information\n");
|
||||
"Error, could not get enclosure information");
|
||||
}
|
||||
|
||||
static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
|
||||
@ -4619,21 +4619,13 @@ sglist_finished:
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define BUFLEN 128
|
||||
static inline void warn_zero_length_transfer(struct ctlr_info *h,
|
||||
u8 *cdb, int cdb_len,
|
||||
const char *func)
|
||||
{
|
||||
char buf[BUFLEN];
|
||||
int outlen;
|
||||
int i;
|
||||
|
||||
outlen = scnprintf(buf, BUFLEN,
|
||||
"%s: Blocking zero-length request: CDB:", func);
|
||||
for (i = 0; i < cdb_len; i++)
|
||||
outlen += scnprintf(buf+outlen, BUFLEN - outlen,
|
||||
"%02hhx", cdb[i]);
|
||||
dev_warn(&h->pdev->dev, "%s\n", buf);
|
||||
dev_warn(&h->pdev->dev,
|
||||
"%s: Blocking zero-length request: CDB:%*phN\n",
|
||||
func, cdb_len, cdb);
|
||||
}
|
||||
|
||||
#define IO_ACCEL_INELIGIBLE 1
|
||||
@ -8223,8 +8215,6 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
|
||||
|
||||
if (!device)
|
||||
continue;
|
||||
if (!device->scsi3addr)
|
||||
continue;
|
||||
if (!hpsa_vpd_page_supported(h, device->scsi3addr,
|
||||
HPSA_VPD_LV_IOACCEL_STATUS))
|
||||
continue;
|
||||
|
@ -181,7 +181,7 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -220,7 +220,7 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
|
||||
default:
|
||||
break;
|
||||
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
@ -464,7 +464,7 @@ static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
|
||||
default:
|
||||
vhost->state = state;
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -500,7 +500,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
};
|
||||
}
|
||||
break;
|
||||
case IBMVFC_HOST_ACTION_TGT_INIT:
|
||||
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
|
||||
@ -515,7 +515,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
|
||||
default:
|
||||
vhost->action = action;
|
||||
break;
|
||||
};
|
||||
}
|
||||
break;
|
||||
case IBMVFC_HOST_ACTION_LOGO:
|
||||
case IBMVFC_HOST_ACTION_QUERY_TGTS:
|
||||
@ -526,7 +526,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
|
||||
default:
|
||||
vhost->action = action;
|
||||
break;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1601,7 +1601,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
|
||||
case IBMVFC_ACTIVE:
|
||||
result = 0;
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -1856,7 +1856,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
|
||||
break;
|
||||
default:
|
||||
return -ENOTSUPP;
|
||||
};
|
||||
}
|
||||
|
||||
if (port_id == -1)
|
||||
return -EINVAL;
|
||||
@ -2661,7 +2661,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
|
||||
vhost->delay_init = 1;
|
||||
__ibmvfc_reset_host(vhost);
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
break;
|
||||
case IBMVFC_AE_LINK_UP:
|
||||
@ -2715,7 +2715,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
|
||||
default:
|
||||
dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
|
||||
break;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3351,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
|
||||
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
|
||||
rsp->status, rsp->error, status);
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
ibmvfc_free_event(evt);
|
||||
@ -3451,7 +3451,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
|
||||
ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
|
||||
ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
ibmvfc_free_event(evt);
|
||||
@ -3522,7 +3522,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
|
||||
default:
|
||||
tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
|
||||
ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
|
||||
@ -3626,7 +3626,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
|
||||
ibmvfc_get_fc_type(fc_reason), fc_reason,
|
||||
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
ibmvfc_free_event(evt);
|
||||
@ -3838,7 +3838,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
|
||||
rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
|
||||
rsp->fc_explain, status);
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
ibmvfc_free_event(evt);
|
||||
@ -4236,7 +4236,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
|
||||
case IBMVFC_HOST_ACTION_REENABLE:
|
||||
default:
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -4464,7 +4464,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ static bool connection_broken(struct scsi_info *vscsi)
|
||||
cpu_to_be64(buffer[MSG_HI]),
|
||||
cpu_to_be64(buffer[MSG_LOW]));
|
||||
|
||||
pr_debug("connection_broken: rc %ld\n", h_return_code);
|
||||
dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code);
|
||||
|
||||
if (h_return_code == H_CLOSED)
|
||||
rc = true;
|
||||
@ -210,7 +210,7 @@ static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
|
||||
}
|
||||
} while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
|
||||
|
||||
pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
|
||||
dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -291,9 +291,9 @@ static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
|
||||
ibmvscsis_delete_client_info(vscsi, false);
|
||||
}
|
||||
|
||||
pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
|
||||
vscsi->phyp_acr_state);
|
||||
dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
|
||||
vscsi->phyp_acr_state);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
@ -428,8 +428,8 @@ static void ibmvscsis_disconnect(struct work_struct *work)
|
||||
vscsi->flags |= DISCONNECT_SCHEDULED;
|
||||
vscsi->flags &= ~SCHEDULE_DISCONNECT;
|
||||
|
||||
pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
|
||||
vscsi->state);
|
||||
dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state);
|
||||
|
||||
/*
|
||||
* check which state we are in and see if we
|
||||
@ -540,13 +540,14 @@ static void ibmvscsis_disconnect(struct work_struct *work)
|
||||
}
|
||||
|
||||
if (wait_idle) {
|
||||
pr_debug("disconnect start wait, active %d, sched %d\n",
|
||||
(int)list_empty(&vscsi->active_q),
|
||||
(int)list_empty(&vscsi->schedule_q));
|
||||
dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n",
|
||||
(int)list_empty(&vscsi->active_q),
|
||||
(int)list_empty(&vscsi->schedule_q));
|
||||
if (!list_empty(&vscsi->active_q) ||
|
||||
!list_empty(&vscsi->schedule_q)) {
|
||||
vscsi->flags |= WAIT_FOR_IDLE;
|
||||
pr_debug("disconnect flags 0x%x\n", vscsi->flags);
|
||||
dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n",
|
||||
vscsi->flags);
|
||||
/*
|
||||
* This routine is can not be called with the interrupt
|
||||
* lock held.
|
||||
@ -555,7 +556,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
|
||||
wait_for_completion(&vscsi->wait_idle);
|
||||
spin_lock_bh(&vscsi->intr_lock);
|
||||
}
|
||||
pr_debug("disconnect stop wait\n");
|
||||
dev_dbg(&vscsi->dev, "disconnect stop wait\n");
|
||||
|
||||
ibmvscsis_adapter_idle(vscsi);
|
||||
}
|
||||
@ -597,8 +598,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
|
||||
|
||||
vscsi->flags |= flag_bits;
|
||||
|
||||
pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
|
||||
new_state, flag_bits, vscsi->flags, vscsi->state);
|
||||
dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
|
||||
new_state, flag_bits, vscsi->flags, vscsi->state);
|
||||
|
||||
if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
|
||||
vscsi->flags |= SCHEDULE_DISCONNECT;
|
||||
@ -648,8 +649,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
|
||||
}
|
||||
}
|
||||
|
||||
pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
|
||||
vscsi->flags, vscsi->new_state);
|
||||
dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
|
||||
vscsi->flags, vscsi->new_state);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -724,7 +725,8 @@ static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
|
||||
break;
|
||||
|
||||
case H_CLOSED:
|
||||
pr_warn("init_msg: failed to send, rc %ld\n", rc);
|
||||
dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
|
||||
rc);
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
@ -768,7 +770,7 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
|
||||
{
|
||||
long rc = ADAPT_SUCCESS;
|
||||
|
||||
pr_debug("init_msg: state 0x%hx\n", vscsi->state);
|
||||
dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state);
|
||||
|
||||
rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
|
||||
(u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
|
||||
@ -776,10 +778,10 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
|
||||
if (rc == H_SUCCESS) {
|
||||
vscsi->client_data.partition_number =
|
||||
be64_to_cpu(*(u64 *)vscsi->map_buf);
|
||||
pr_debug("init_msg, part num %d\n",
|
||||
vscsi->client_data.partition_number);
|
||||
dev_dbg(&vscsi->dev, "init_msg, part num %d\n",
|
||||
vscsi->client_data.partition_number);
|
||||
} else {
|
||||
pr_debug("init_msg h_vioctl rc %ld\n", rc);
|
||||
dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc);
|
||||
rc = ADAPT_SUCCESS;
|
||||
}
|
||||
|
||||
@ -813,7 +815,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
|
||||
if (rc == H_SUCCESS)
|
||||
vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
|
||||
else if (rc != H_NOT_FOUND)
|
||||
pr_err("Error from Enable Prepare for Suspend: %ld\n", rc);
|
||||
dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n",
|
||||
rc);
|
||||
|
||||
vscsi->flags &= PRESERVE_FLAG_FIELDS;
|
||||
vscsi->rsp_q_timer.timer_pops = 0;
|
||||
@ -822,8 +825,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
|
||||
|
||||
rc = vio_enable_interrupts(vscsi->dma_dev);
|
||||
if (rc) {
|
||||
pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
|
||||
rc);
|
||||
dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n",
|
||||
rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -883,7 +886,7 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
|
||||
int bytes;
|
||||
long rc = ADAPT_SUCCESS;
|
||||
|
||||
pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
|
||||
dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags);
|
||||
|
||||
/* don't reset, the client did it for us */
|
||||
if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
|
||||
@ -906,7 +909,8 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
|
||||
}
|
||||
|
||||
if (rc != ADAPT_SUCCESS) {
|
||||
pr_debug("reset_queue: reg_crq rc %ld\n", rc);
|
||||
dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n",
|
||||
rc);
|
||||
|
||||
vscsi->state = ERR_DISCONNECTED;
|
||||
vscsi->flags |= RESPONSE_Q_DOWN;
|
||||
@ -985,14 +989,15 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
|
||||
/* See if there is a Resume event in the queue */
|
||||
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
|
||||
|
||||
pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
|
||||
vscsi->flags, vscsi->state, (int)crq->valid);
|
||||
dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
|
||||
vscsi->flags, vscsi->state, (int)crq->valid);
|
||||
|
||||
if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
|
||||
rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
|
||||
0, 0);
|
||||
if (rc) {
|
||||
pr_err("Ready for Suspend Vioctl failed: %ld\n", rc);
|
||||
dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n",
|
||||
rc);
|
||||
rc = 0;
|
||||
}
|
||||
} else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
|
||||
@ -1012,7 +1017,7 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
|
||||
|
||||
if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
|
||||
(crq->format != RESUME_FROM_SUSP)))
|
||||
pr_err("Invalid element in CRQ after Prepare for Suspend");
|
||||
dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend");
|
||||
}
|
||||
|
||||
vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
|
||||
@ -1036,8 +1041,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
|
||||
{
|
||||
long rc = ADAPT_SUCCESS;
|
||||
|
||||
pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
|
||||
(int)crq->format, vscsi->flags, vscsi->state);
|
||||
dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
|
||||
(int)crq->format, vscsi->flags, vscsi->state);
|
||||
|
||||
switch (crq->format) {
|
||||
case MIGRATED:
|
||||
@ -1073,14 +1078,14 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
|
||||
!list_empty(&vscsi->schedule_q) ||
|
||||
!list_empty(&vscsi->waiting_rsp) ||
|
||||
!list_empty(&vscsi->active_q)) {
|
||||
pr_debug("debit %d, sched %d, wait %d, active %d\n",
|
||||
vscsi->debit,
|
||||
(int)list_empty(&vscsi->schedule_q),
|
||||
(int)list_empty(&vscsi->waiting_rsp),
|
||||
(int)list_empty(&vscsi->active_q));
|
||||
pr_warn("connection lost with outstanding work\n");
|
||||
dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n",
|
||||
vscsi->debit,
|
||||
(int)list_empty(&vscsi->schedule_q),
|
||||
(int)list_empty(&vscsi->waiting_rsp),
|
||||
(int)list_empty(&vscsi->active_q));
|
||||
dev_warn(&vscsi->dev, "connection lost with outstanding work\n");
|
||||
} else {
|
||||
pr_debug("trans_event: SRP Processing, but no outstanding work\n");
|
||||
dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n");
|
||||
}
|
||||
|
||||
ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
|
||||
@ -1097,8 +1102,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
|
||||
break;
|
||||
|
||||
case PREPARE_FOR_SUSPEND:
|
||||
pr_debug("Prep for Suspend, crq status = 0x%x\n",
|
||||
(int)crq->status);
|
||||
dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n",
|
||||
(int)crq->status);
|
||||
switch (vscsi->state) {
|
||||
case ERR_DISCONNECTED:
|
||||
case WAIT_CONNECTION:
|
||||
@ -1119,15 +1124,15 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
|
||||
case ERR_DISCONNECT:
|
||||
case ERR_DISCONNECT_RECONNECT:
|
||||
case WAIT_IDLE:
|
||||
pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
|
||||
vscsi->state);
|
||||
dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
|
||||
vscsi->state);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case RESUME_FROM_SUSP:
|
||||
pr_debug("Resume from Suspend, crq status = 0x%x\n",
|
||||
(int)crq->status);
|
||||
dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n",
|
||||
(int)crq->status);
|
||||
if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
|
||||
vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
|
||||
} else {
|
||||
@ -1152,8 +1157,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
|
||||
|
||||
rc = vscsi->flags & SCHEDULE_DISCONNECT;
|
||||
|
||||
pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
|
||||
vscsi->flags, vscsi->state, rc);
|
||||
dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
|
||||
vscsi->flags, vscsi->state, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -1175,8 +1180,8 @@ static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
|
||||
bool ack = true;
|
||||
volatile u8 valid;
|
||||
|
||||
pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
|
||||
vscsi->flags, vscsi->state, vscsi->cmd_q.index);
|
||||
dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
|
||||
vscsi->flags, vscsi->state, vscsi->cmd_q.index);
|
||||
|
||||
rc = vscsi->flags & SCHEDULE_DISCONNECT;
|
||||
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
|
||||
@ -1204,7 +1209,7 @@ poll_work:
|
||||
* if a tranport event has occurred leave
|
||||
* everything but transport events on the queue
|
||||
*/
|
||||
pr_debug("poll_cmd_q, ignoring\n");
|
||||
dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n");
|
||||
|
||||
/*
|
||||
* need to decrement the queue index so we can
|
||||
@ -1233,7 +1238,7 @@ poll_work:
|
||||
if (ack) {
|
||||
vio_enable_interrupts(vscsi->dma_dev);
|
||||
ack = false;
|
||||
pr_debug("poll_cmd_q, reenabling interrupts\n");
|
||||
dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n");
|
||||
}
|
||||
valid = crq->valid;
|
||||
dma_rmb();
|
||||
@ -1241,7 +1246,7 @@ poll_work:
|
||||
goto poll_work;
|
||||
}
|
||||
|
||||
pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
|
||||
dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1258,9 +1263,9 @@ static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
|
||||
{
|
||||
struct ibmvscsis_cmd *cmd, *nxt;
|
||||
|
||||
pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
|
||||
(int)list_empty(&vscsi->waiting_rsp),
|
||||
vscsi->rsp_q_timer.started);
|
||||
dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
|
||||
(int)list_empty(&vscsi->waiting_rsp),
|
||||
vscsi->rsp_q_timer.started);
|
||||
|
||||
list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
|
||||
list_del(&cmd->list);
|
||||
@ -1317,8 +1322,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
|
||||
int free_qs = false;
|
||||
long rc = 0;
|
||||
|
||||
pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
|
||||
vscsi->state);
|
||||
dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state);
|
||||
|
||||
/* Only need to free qs if we're disconnecting from client */
|
||||
if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
|
||||
@ -1336,7 +1341,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
|
||||
break;
|
||||
case ERR_DISCONNECT_RECONNECT:
|
||||
ibmvscsis_reset_queue(vscsi);
|
||||
pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
|
||||
dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n",
|
||||
vscsi->flags);
|
||||
break;
|
||||
|
||||
case ERR_DISCONNECT:
|
||||
@ -1347,8 +1353,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
|
||||
vscsi->state = ERR_DISCONNECTED;
|
||||
else
|
||||
vscsi->state = WAIT_ENABLED;
|
||||
pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state);
|
||||
dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state);
|
||||
break;
|
||||
|
||||
case WAIT_IDLE:
|
||||
@ -1370,15 +1376,15 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
|
||||
vscsi->flags &= ~DISCONNECT_SCHEDULED;
|
||||
}
|
||||
|
||||
pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state);
|
||||
dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state);
|
||||
ibmvscsis_poll_cmd_q(vscsi);
|
||||
break;
|
||||
|
||||
case ERR_DISCONNECTED:
|
||||
vscsi->flags &= ~DISCONNECT_SCHEDULED;
|
||||
pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state);
|
||||
dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1419,13 +1425,13 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
|
||||
vscsi->phyp_acr_state = 0;
|
||||
vscsi->phyp_acr_flags = 0;
|
||||
|
||||
pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
|
||||
vscsi->phyp_acr_state);
|
||||
dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
|
||||
vscsi->phyp_acr_state);
|
||||
}
|
||||
|
||||
pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
|
||||
vscsi->flags, vscsi->state, vscsi->new_state);
|
||||
dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
|
||||
vscsi->flags, vscsi->state, vscsi->new_state);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1464,8 +1470,8 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
|
||||
cmd->init_time = mftb();
|
||||
iue->remote_token = crq->IU_data_ptr;
|
||||
iue->iu_len = len;
|
||||
pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
|
||||
be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
|
||||
dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
|
||||
be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
|
||||
break;
|
||||
case H_PERMISSION:
|
||||
if (connection_broken(vscsi))
|
||||
@ -1536,10 +1542,10 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
|
||||
if (connection_broken(vscsi))
|
||||
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
|
||||
}
|
||||
pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
|
||||
rc);
|
||||
pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
|
||||
be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
|
||||
dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
|
||||
rc);
|
||||
dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
|
||||
be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
|
||||
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
|
||||
flag_bits);
|
||||
goto free_dma;
|
||||
@ -1595,7 +1601,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
|
||||
|
||||
free_dma:
|
||||
dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
|
||||
pr_debug("Leaving adapter_info, rc %ld\n", rc);
|
||||
dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -1629,7 +1635,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
|
||||
*/
|
||||
min_len = offsetof(struct capabilities, migration);
|
||||
if ((olen < min_len) || (olen > PAGE_SIZE)) {
|
||||
pr_warn("cap_mad: invalid len %d\n", olen);
|
||||
dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen);
|
||||
mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
|
||||
return 0;
|
||||
}
|
||||
@ -1654,9 +1660,9 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
|
||||
common = (struct mad_capability_common *)&cap->migration;
|
||||
|
||||
while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
|
||||
pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
|
||||
len, be32_to_cpu(common->cap_type),
|
||||
be16_to_cpu(common->length));
|
||||
dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
|
||||
len, be32_to_cpu(common->cap_type),
|
||||
be16_to_cpu(common->length));
|
||||
|
||||
cap_len = be16_to_cpu(common->length);
|
||||
if (cap_len > len) {
|
||||
@ -1673,7 +1679,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
|
||||
|
||||
switch (common->cap_type) {
|
||||
default:
|
||||
pr_debug("cap_mad: unsupported capability\n");
|
||||
dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n");
|
||||
common->server_support = 0;
|
||||
flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
|
||||
cap->flags &= ~flag;
|
||||
@ -1693,8 +1699,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
|
||||
be64_to_cpu(mad->buffer));
|
||||
|
||||
if (rc != H_SUCCESS) {
|
||||
pr_debug("cap_mad: failed to copy to client, rc %ld\n",
|
||||
rc);
|
||||
dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n",
|
||||
rc);
|
||||
|
||||
if (rc == H_PERMISSION) {
|
||||
if (connection_broken(vscsi))
|
||||
@ -1702,8 +1708,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
|
||||
CLIENT_FAILED);
|
||||
}
|
||||
|
||||
pr_warn("cap_mad: error copying data to client, rc %ld\n",
|
||||
rc);
|
||||
dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n",
|
||||
rc);
|
||||
ibmvscsis_post_disconnect(vscsi,
|
||||
ERR_DISCONNECT_RECONNECT,
|
||||
flag_bits);
|
||||
@ -1712,8 +1718,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
|
||||
|
||||
dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
|
||||
|
||||
pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
|
||||
rc, vscsi->client_cap);
|
||||
dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
|
||||
rc, vscsi->client_cap);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -1749,7 +1755,7 @@ static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
|
||||
vscsi->fast_fail = true;
|
||||
mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
|
||||
} else {
|
||||
pr_warn("fast fail mad sent after login\n");
|
||||
dev_warn(&vscsi->dev, "fast fail mad sent after login\n");
|
||||
mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
|
||||
}
|
||||
break;
|
||||
@ -1809,9 +1815,9 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
|
||||
*/
|
||||
if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
|
||||
(vscsi->state == SRP_PROCESSING)) {
|
||||
pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
|
||||
vscsi->flags, (int)vscsi->rsp_q_timer.started,
|
||||
vscsi->rsp_q_timer.timer_pops);
|
||||
dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
|
||||
vscsi->flags, (int)vscsi->rsp_q_timer.started,
|
||||
vscsi->rsp_q_timer.timer_pops);
|
||||
|
||||
/*
|
||||
* Check if the timer is running; if it
|
||||
@ -1947,8 +1953,9 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
|
||||
be64_to_cpu(msg_hi),
|
||||
be64_to_cpu(cmd->rsp.tag));
|
||||
|
||||
pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
|
||||
cmd, be64_to_cpu(cmd->rsp.tag), rc);
|
||||
dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
|
||||
cmd, be64_to_cpu(cmd->rsp.tag),
|
||||
rc);
|
||||
|
||||
/* if all ok free up the command
|
||||
* element resources
|
||||
@ -2003,7 +2010,8 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
|
||||
list_add_tail(&cmd->list, &vscsi->waiting_rsp);
|
||||
ibmvscsis_send_messages(vscsi);
|
||||
} else {
|
||||
pr_debug("Error sending mad response, rc %ld\n", rc);
|
||||
dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n",
|
||||
rc);
|
||||
if (rc == H_PERMISSION) {
|
||||
if (connection_broken(vscsi))
|
||||
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
|
||||
@ -2039,8 +2047,8 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
|
||||
* expecting a response.
|
||||
*/
|
||||
case WAIT_CONNECTION:
|
||||
pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
|
||||
vscsi->flags);
|
||||
dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
|
||||
vscsi->flags);
|
||||
return ADAPT_SUCCESS;
|
||||
|
||||
case SRP_PROCESSING:
|
||||
@ -2075,12 +2083,12 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
|
||||
if (!rc) {
|
||||
mad = (struct mad_common *)&vio_iu(iue)->mad;
|
||||
|
||||
pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
|
||||
dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type));
|
||||
|
||||
rc = ibmvscsis_process_mad(vscsi, iue);
|
||||
|
||||
pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
|
||||
rc);
|
||||
dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n",
|
||||
be16_to_cpu(mad->status), rc);
|
||||
|
||||
if (!rc)
|
||||
ibmvscsis_send_mad_resp(vscsi, cmd, crq);
|
||||
@ -2088,7 +2096,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
|
||||
ibmvscsis_free_cmd_resources(vscsi, cmd);
|
||||
}
|
||||
|
||||
pr_debug("Leaving mad, rc %ld\n", rc);
|
||||
dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -2211,16 +2219,17 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
|
||||
{
|
||||
char *name = tport->tport_name;
|
||||
struct ibmvscsis_nexus *nexus;
|
||||
struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
|
||||
int rc;
|
||||
|
||||
if (tport->ibmv_nexus) {
|
||||
pr_debug("tport->ibmv_nexus already exists\n");
|
||||
dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
|
||||
if (!nexus) {
|
||||
pr_err("Unable to allocate struct ibmvscsis_nexus\n");
|
||||
dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -2316,7 +2325,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
|
||||
cmd->rsp.format = VIOSRP_SRP_FORMAT;
|
||||
cmd->rsp.tag = req->tag;
|
||||
|
||||
pr_debug("srp_login: reason 0x%x\n", reason);
|
||||
dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason);
|
||||
|
||||
if (reason)
|
||||
rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
|
||||
@ -2333,7 +2342,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
|
||||
ibmvscsis_free_cmd_resources(vscsi, cmd);
|
||||
}
|
||||
|
||||
pr_debug("Leaving srp_login, rc %ld\n", rc);
|
||||
dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -2415,8 +2424,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
|
||||
|
||||
case SRP_TSK_MGMT:
|
||||
tsk = &vio_iu(iue)->srp.tsk_mgmt;
|
||||
pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
|
||||
tsk->tag);
|
||||
dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n",
|
||||
tsk->tag, tsk->tag);
|
||||
cmd->rsp.tag = tsk->tag;
|
||||
vscsi->debit += 1;
|
||||
cmd->type = TASK_MANAGEMENT;
|
||||
@ -2425,8 +2434,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
|
||||
break;
|
||||
|
||||
case SRP_CMD:
|
||||
pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
|
||||
srp->tag);
|
||||
dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n",
|
||||
srp->tag, srp->tag);
|
||||
cmd->rsp.tag = srp->tag;
|
||||
vscsi->debit += 1;
|
||||
cmd->type = SCSI_CDB;
|
||||
@ -2603,7 +2612,7 @@ static int read_dma_window(struct scsi_info *vscsi)
|
||||
"ibm,my-dma-window",
|
||||
NULL);
|
||||
if (!dma_window) {
|
||||
pr_err("Couldn't find ibm,my-dma-window property\n");
|
||||
dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -2613,7 +2622,7 @@ static int read_dma_window(struct scsi_info *vscsi)
|
||||
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
|
||||
NULL);
|
||||
if (!prop) {
|
||||
pr_warn("Couldn't find ibm,#dma-address-cells property\n");
|
||||
dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n");
|
||||
dma_window++;
|
||||
} else {
|
||||
dma_window += be32_to_cpu(*prop);
|
||||
@ -2622,7 +2631,7 @@ static int read_dma_window(struct scsi_info *vscsi)
|
||||
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
|
||||
NULL);
|
||||
if (!prop) {
|
||||
pr_warn("Couldn't find ibm,#dma-size-cells property\n");
|
||||
dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n");
|
||||
dma_window++;
|
||||
} else {
|
||||
dma_window += be32_to_cpu(*prop);
|
||||
@ -2808,8 +2817,8 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
|
||||
|
||||
srp_tsk->lun.scsi_lun[0] &= 0x3f;
|
||||
|
||||
pr_debug("calling submit_tmr, func %d\n",
|
||||
srp_tsk->tsk_mgmt_func);
|
||||
dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n",
|
||||
srp_tsk->tsk_mgmt_func);
|
||||
rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
|
||||
scsilun_to_int(&srp_tsk->lun), srp_tsk,
|
||||
tcm_type, GFP_KERNEL, tag_to_abort, 0);
|
||||
@ -3113,8 +3122,8 @@ static long srp_build_response(struct scsi_info *vscsi,
|
||||
if (cmd->type == SCSI_CDB) {
|
||||
rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
|
||||
if (rsp->status) {
|
||||
pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
|
||||
(int)rsp->status);
|
||||
dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n",
|
||||
cmd, (int)rsp->status);
|
||||
ibmvscsis_determine_resid(se_cmd, rsp);
|
||||
if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
|
||||
rsp->sense_data_len =
|
||||
@ -3127,7 +3136,8 @@ static long srp_build_response(struct scsi_info *vscsi,
|
||||
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
|
||||
UCSOLNT_RESP_SHIFT;
|
||||
} else if (cmd->flags & CMD_FAST_FAIL) {
|
||||
pr_debug("build_resp: cmd %p, fast fail\n", cmd);
|
||||
dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n",
|
||||
cmd);
|
||||
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
|
||||
UCSOLNT_RESP_SHIFT;
|
||||
} else {
|
||||
@ -3340,7 +3350,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
|
||||
|
||||
spin_lock_bh(&vscsi->intr_lock);
|
||||
|
||||
pr_debug("got interrupt\n");
|
||||
dev_dbg(&vscsi->dev, "got interrupt\n");
|
||||
|
||||
/*
|
||||
* if we are in a path where we are waiting for all pending commands
|
||||
@ -3350,8 +3360,8 @@ static void ibmvscsis_handle_crq(unsigned long data)
|
||||
if (TARGET_STOP(vscsi)) {
|
||||
vio_enable_interrupts(vscsi->dma_dev);
|
||||
|
||||
pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state);
|
||||
dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
|
||||
vscsi->flags, vscsi->state);
|
||||
spin_unlock_bh(&vscsi->intr_lock);
|
||||
return;
|
||||
}
|
||||
@ -3414,20 +3424,20 @@ cmd_work:
|
||||
if (ack) {
|
||||
vio_enable_interrupts(vscsi->dma_dev);
|
||||
ack = false;
|
||||
pr_debug("handle_crq, reenabling interrupts\n");
|
||||
dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n");
|
||||
}
|
||||
valid = crq->valid;
|
||||
dma_rmb();
|
||||
if (valid)
|
||||
goto cmd_work;
|
||||
} else {
|
||||
pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
|
||||
vscsi->flags, vscsi->state, vscsi->cmd_q.index);
|
||||
dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
|
||||
vscsi->flags, vscsi->state, vscsi->cmd_q.index);
|
||||
}
|
||||
|
||||
pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
|
||||
(int)list_empty(&vscsi->schedule_q), vscsi->flags,
|
||||
vscsi->state);
|
||||
dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
|
||||
(int)list_empty(&vscsi->schedule_q), vscsi->flags,
|
||||
vscsi->state);
|
||||
|
||||
spin_unlock_bh(&vscsi->intr_lock);
|
||||
}
|
||||
@ -3443,7 +3453,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
|
||||
vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
|
||||
if (!vscsi) {
|
||||
rc = -ENOMEM;
|
||||
pr_err("probe: allocation of adapter failed\n");
|
||||
dev_err(&vdev->dev, "probe: allocation of adapter failed\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -3456,14 +3466,14 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
|
||||
snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
|
||||
dev_name(&vdev->dev));
|
||||
|
||||
pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
|
||||
dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name);
|
||||
|
||||
rc = read_dma_window(vscsi);
|
||||
if (rc)
|
||||
goto free_adapter;
|
||||
pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
|
||||
vscsi->dds.window[LOCAL].liobn,
|
||||
vscsi->dds.window[REMOTE].liobn);
|
||||
dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
|
||||
vscsi->dds.window[LOCAL].liobn,
|
||||
vscsi->dds.window[REMOTE].liobn);
|
||||
|
||||
strcpy(vscsi->eye, "VSCSI ");
|
||||
strncat(vscsi->eye, vdev->name, MAX_EYE);
|
||||
@ -3541,8 +3551,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
|
||||
* client can connect" and the client isn't activated yet.
|
||||
* We'll make the call again when he sends an init msg.
|
||||
*/
|
||||
pr_debug("probe hrc %ld, client partition num %d\n",
|
||||
hrc, vscsi->client_data.partition_number);
|
||||
dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
|
||||
hrc, vscsi->client_data.partition_number);
|
||||
|
||||
tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
|
||||
(unsigned long)vscsi);
|
||||
@ -3602,7 +3612,7 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
|
||||
{
|
||||
struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
|
||||
|
||||
pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
|
||||
dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
|
||||
|
||||
spin_lock_bh(&vscsi->intr_lock);
|
||||
ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
|
||||
@ -3766,14 +3776,16 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
|
||||
* attempt an srp_transfer_data.
|
||||
*/
|
||||
if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
|
||||
pr_err("write_pending failed since: %d\n", vscsi->flags);
|
||||
dev_err(&vscsi->dev, "write_pending failed since: %d\n",
|
||||
vscsi->flags);
|
||||
return -EIO;
|
||||
|
||||
}
|
||||
|
||||
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
|
||||
1, 1);
|
||||
if (rc) {
|
||||
pr_err("srp_transfer_data() failed: %d\n", rc);
|
||||
dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc);
|
||||
return -EIO;
|
||||
}
|
||||
/*
|
||||
@ -3811,7 +3823,7 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
|
||||
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
|
||||
1);
|
||||
if (rc) {
|
||||
pr_err("srp_transfer_data failed: %d\n", rc);
|
||||
dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
|
||||
sd = se_cmd->sense_buffer;
|
||||
se_cmd->scsi_sense_length = 18;
|
||||
memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
|
||||
@ -3834,7 +3846,7 @@ static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
|
||||
struct scsi_info *vscsi = cmd->adapter;
|
||||
uint len;
|
||||
|
||||
pr_debug("queue_status %p\n", se_cmd);
|
||||
dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd);
|
||||
|
||||
srp_build_response(vscsi, cmd, &len);
|
||||
cmd->rsp.format = SRP_FORMAT;
|
||||
@ -3854,8 +3866,8 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
|
||||
u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
|
||||
uint len;
|
||||
|
||||
pr_debug("queue_tm_rsp %p, status %d\n",
|
||||
se_cmd, (int)se_cmd->se_tmr_req->response);
|
||||
dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n",
|
||||
se_cmd, (int)se_cmd->se_tmr_req->response);
|
||||
|
||||
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
|
||||
cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
|
||||
@ -3877,8 +3889,12 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
|
||||
|
||||
static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
|
||||
{
|
||||
pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
|
||||
se_cmd, se_cmd->tag);
|
||||
struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
|
||||
se_cmd);
|
||||
struct scsi_info *vscsi = cmd->adapter;
|
||||
|
||||
dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n",
|
||||
se_cmd, se_cmd->tag);
|
||||
}
|
||||
|
||||
static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
|
||||
@ -3886,12 +3902,14 @@ static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
|
||||
const char *name)
|
||||
{
|
||||
struct ibmvscsis_tport *tport;
|
||||
struct scsi_info *vscsi;
|
||||
|
||||
tport = ibmvscsis_lookup_port(name);
|
||||
if (tport) {
|
||||
vscsi = container_of(tport, struct scsi_info, tport);
|
||||
tport->tport_proto_id = SCSI_PROTOCOL_SRP;
|
||||
pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
|
||||
name, tport, tport->tport_proto_id);
|
||||
dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n",
|
||||
name, tport, tport->tport_proto_id);
|
||||
return &tport->tport_wwn;
|
||||
}
|
||||
|
||||
@ -3903,9 +3921,10 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn)
|
||||
struct ibmvscsis_tport *tport = container_of(wwn,
|
||||
struct ibmvscsis_tport,
|
||||
tport_wwn);
|
||||
struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
|
||||
|
||||
pr_debug("drop_tport(%s)\n",
|
||||
config_item_name(&tport->tport_wwn.wwn_group.cg_item));
|
||||
dev_dbg(&vscsi->dev, "drop_tport(%s)\n",
|
||||
config_item_name(&tport->tport_wwn.wwn_group.cg_item));
|
||||
}
|
||||
|
||||
static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
|
||||
@ -3990,12 +4009,12 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
|
||||
|
||||
rc = kstrtoul(page, 0, &tmp);
|
||||
if (rc < 0) {
|
||||
pr_err("Unable to extract srpt_tpg_store_enable\n");
|
||||
dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((tmp != 0) && (tmp != 1)) {
|
||||
pr_err("Illegal value for srpt_tpg_store_enable\n");
|
||||
dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -4004,8 +4023,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
|
||||
tport->enabled = true;
|
||||
lrc = ibmvscsis_enable_change_state(vscsi);
|
||||
if (lrc)
|
||||
pr_err("enable_change_state failed, rc %ld state %d\n",
|
||||
lrc, vscsi->state);
|
||||
dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n",
|
||||
lrc, vscsi->state);
|
||||
spin_unlock_bh(&vscsi->intr_lock);
|
||||
} else {
|
||||
spin_lock_bh(&vscsi->intr_lock);
|
||||
@ -4015,7 +4034,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
|
||||
spin_unlock_bh(&vscsi->intr_lock);
|
||||
}
|
||||
|
||||
pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
|
||||
dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
|
||||
vscsi->state);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -9653,8 +9653,8 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
|
||||
if (i == 0) {
|
||||
entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
|
||||
ioa_cfg->hrrq[i].min_cmd_id = 0;
|
||||
ioa_cfg->hrrq[i].max_cmd_id =
|
||||
(entries_each_hrrq - 1);
|
||||
ioa_cfg->hrrq[i].max_cmd_id =
|
||||
(entries_each_hrrq - 1);
|
||||
} else {
|
||||
entries_each_hrrq =
|
||||
IPR_NUM_BASE_CMD_BLKS/
|
||||
|
@ -307,6 +307,7 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
|
||||
|
||||
/**
|
||||
* iscsi_sw_tcp_xmit - TCP transmit
|
||||
* @conn: iscsi connection
|
||||
**/
|
||||
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
|
||||
{
|
||||
@ -357,6 +358,7 @@ error:
|
||||
|
||||
/**
|
||||
* iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
|
||||
* @conn: iscsi connection
|
||||
*/
|
||||
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
|
||||
{
|
||||
|
@ -1696,6 +1696,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
|
||||
*/
|
||||
switch (session->state) {
|
||||
case ISCSI_STATE_FAILED:
|
||||
/*
|
||||
* cmds should fail during shutdown, if the session
|
||||
* state is bad, allowing completion to happen
|
||||
*/
|
||||
if (unlikely(system_state != SYSTEM_RUNNING)) {
|
||||
reason = FAILURE_SESSION_FAILED;
|
||||
sc->result = DID_NO_CONNECT << 16;
|
||||
break;
|
||||
}
|
||||
case ISCSI_STATE_IN_RECOVERY:
|
||||
reason = FAILURE_SESSION_IN_RECOVERY;
|
||||
sc->result = DID_IMM_RETRY << 16;
|
||||
@ -1978,6 +1987,19 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
|
||||
}
|
||||
|
||||
if (session->state != ISCSI_STATE_LOGGED_IN) {
|
||||
/*
|
||||
* During shutdown, if session is prematurely disconnected,
|
||||
* recovery won't happen and there will be hung cmds. Not
|
||||
* handling cmds would trigger EH, also bad in this case.
|
||||
* Instead, handle cmd, allow completion to happen and let
|
||||
* upper layer to deal with the result.
|
||||
*/
|
||||
if (unlikely(system_state != SYSTEM_RUNNING)) {
|
||||
sc->result = DID_NO_CONNECT << 16;
|
||||
ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
|
||||
rc = BLK_EH_HANDLED;
|
||||
goto done;
|
||||
}
|
||||
/*
|
||||
* We are probably in the middle of iscsi recovery so let
|
||||
* that complete and handle the error.
|
||||
@ -2082,7 +2104,7 @@ done:
|
||||
task->last_timeout = jiffies;
|
||||
spin_unlock(&session->frwd_lock);
|
||||
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
|
||||
"timer reset" : "nh");
|
||||
"timer reset" : "shutdown or nh");
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
|
||||
@ -2722,8 +2744,10 @@ static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
|
||||
* @iscsit: iscsi transport template
|
||||
* @shost: scsi host
|
||||
* @cmds_max: session can queue
|
||||
* @dd_size: private driver data size, added to session allocation size
|
||||
* @cmd_task_size: LLD task private data size
|
||||
* @initial_cmdsn: initial CmdSN
|
||||
* @id: target ID to add to this session
|
||||
*
|
||||
* This can be used by software iscsi_transports that allocate
|
||||
* a session per scsi host.
|
||||
@ -2951,7 +2975,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_setup);
|
||||
|
||||
/**
|
||||
* iscsi_conn_teardown - teardown iscsi connection
|
||||
* cls_conn: iscsi class connection
|
||||
* @cls_conn: iscsi class connection
|
||||
*
|
||||
* TODO: we may need to make this into a two step process
|
||||
* like scsi-mls remove + put host
|
||||
|
@ -798,6 +798,8 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
|
||||
|
||||
/**
|
||||
* iscsi_tcp_hdr_recv_done - process PDU header
|
||||
* @tcp_conn: iSCSI TCP connection
|
||||
* @segment: the buffer segment being processed
|
||||
*
|
||||
* This is the callback invoked when the PDU header has
|
||||
* been received. If the header is followed by additional
|
||||
@ -876,9 +878,10 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
|
||||
* @conn: iscsi connection
|
||||
* @skb: network buffer with header and/or data segment
|
||||
* @offset: offset in skb
|
||||
* @offload: bool indicating if transfer was offloaded
|
||||
* @offloaded: bool indicating if transfer was offloaded
|
||||
* @status: iscsi TCP status result
|
||||
*
|
||||
* Will return status of transfer in status. And will return
|
||||
* Will return status of transfer in @status. And will return
|
||||
* number of bytes copied.
|
||||
*/
|
||||
int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
|
||||
@ -955,9 +958,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
|
||||
|
||||
/**
|
||||
* iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
|
||||
* @conn: iscsi connection
|
||||
* @task: scsi command task
|
||||
* @sc: scsi command
|
||||
*/
|
||||
int iscsi_tcp_task_init(struct iscsi_task *task)
|
||||
{
|
||||
|
@ -730,7 +730,6 @@ int sas_discover_sata(struct domain_device *dev)
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
sas_discover_event(dev->port, DISCE_PROBE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -212,13 +212,9 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static void sas_probe_devices(struct work_struct *work)
|
||||
static void sas_probe_devices(struct asd_sas_port *port)
|
||||
{
|
||||
struct domain_device *dev, *n;
|
||||
struct sas_discovery_event *ev = to_sas_discovery_event(work);
|
||||
struct asd_sas_port *port = ev->port;
|
||||
|
||||
clear_bit(DISCE_PROBE, &port->disc.pending);
|
||||
|
||||
/* devices must be domain members before link recovery and probe */
|
||||
list_for_each_entry(dev, &port->disco_list, disco_list_node) {
|
||||
@ -294,7 +290,6 @@ int sas_discover_end_dev(struct domain_device *dev)
|
||||
res = sas_notify_lldd_dev_found(dev);
|
||||
if (res)
|
||||
return res;
|
||||
sas_discover_event(dev->port, DISCE_PROBE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -353,13 +348,9 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
|
||||
sas_put_device(dev);
|
||||
}
|
||||
|
||||
static void sas_destruct_devices(struct work_struct *work)
|
||||
void sas_destruct_devices(struct asd_sas_port *port)
|
||||
{
|
||||
struct domain_device *dev, *n;
|
||||
struct sas_discovery_event *ev = to_sas_discovery_event(work);
|
||||
struct asd_sas_port *port = ev->port;
|
||||
|
||||
clear_bit(DISCE_DESTRUCT, &port->disc.pending);
|
||||
|
||||
list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
|
||||
list_del_init(&dev->disco_list_node);
|
||||
@ -370,6 +361,16 @@ static void sas_destruct_devices(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
static void sas_destruct_ports(struct asd_sas_port *port)
|
||||
{
|
||||
struct sas_port *sas_port, *p;
|
||||
|
||||
list_for_each_entry_safe(sas_port, p, &port->sas_port_del_list, del_list) {
|
||||
list_del_init(&sas_port->del_list);
|
||||
sas_port_delete(sas_port);
|
||||
}
|
||||
}
|
||||
|
||||
void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
|
||||
{
|
||||
if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
|
||||
@ -384,7 +385,6 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
|
||||
if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
|
||||
sas_rphy_unlink(dev->rphy);
|
||||
list_move_tail(&dev->disco_list_node, &port->destroy_list);
|
||||
sas_discover_event(dev->port, DISCE_DESTRUCT);
|
||||
}
|
||||
}
|
||||
|
||||
@ -490,6 +490,8 @@ static void sas_discover_domain(struct work_struct *work)
|
||||
port->port_dev = NULL;
|
||||
}
|
||||
|
||||
sas_probe_devices(port);
|
||||
|
||||
SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
|
||||
task_pid_nr(current), error);
|
||||
}
|
||||
@ -523,6 +525,10 @@ static void sas_revalidate_domain(struct work_struct *work)
|
||||
port->id, task_pid_nr(current), res);
|
||||
out:
|
||||
mutex_unlock(&ha->disco_mutex);
|
||||
|
||||
sas_destruct_devices(port);
|
||||
sas_destruct_ports(port);
|
||||
sas_probe_devices(port);
|
||||
}
|
||||
|
||||
/* ---------- Events ---------- */
|
||||
@ -534,7 +540,7 @@ static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
|
||||
* workqueue, or known to be submitted from a context that is
|
||||
* not racing against draining
|
||||
*/
|
||||
scsi_queue_work(ha->core.shost, &sw->work);
|
||||
queue_work(ha->disco_q, &sw->work);
|
||||
}
|
||||
|
||||
static void sas_chain_event(int event, unsigned long *pending,
|
||||
@ -578,10 +584,8 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
|
||||
static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
|
||||
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
|
||||
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
|
||||
[DISCE_PROBE] = sas_probe_devices,
|
||||
[DISCE_SUSPEND] = sas_suspend_devices,
|
||||
[DISCE_RESUME] = sas_resume_devices,
|
||||
[DISCE_DESTRUCT] = sas_destruct_devices,
|
||||
};
|
||||
|
||||
disc->pending = 0;
|
||||
|
@ -29,7 +29,8 @@
|
||||
|
||||
int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
|
||||
{
|
||||
int rc = 0;
|
||||
/* it's added to the defer_q when draining so return succeed */
|
||||
int rc = 1;
|
||||
|
||||
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
|
||||
return 0;
|
||||
@ -39,24 +40,20 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
|
||||
if (list_empty(&sw->drain_node))
|
||||
list_add_tail(&sw->drain_node, &ha->defer_q);
|
||||
} else
|
||||
rc = scsi_queue_work(ha->core.shost, &sw->work);
|
||||
rc = queue_work(ha->event_q, &sw->work);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int sas_queue_event(int event, unsigned long *pending,
|
||||
struct sas_work *work,
|
||||
static int sas_queue_event(int event, struct sas_work *work,
|
||||
struct sas_ha_struct *ha)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
if (!test_and_set_bit(event, pending)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ha->lock, flags);
|
||||
rc = sas_queue_work(ha, work);
|
||||
spin_unlock_irqrestore(&ha->lock, flags);
|
||||
}
|
||||
spin_lock_irqsave(&ha->lock, flags);
|
||||
rc = sas_queue_work(ha, work);
|
||||
spin_unlock_irqrestore(&ha->lock, flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -64,21 +61,25 @@ static int sas_queue_event(int event, unsigned long *pending,
|
||||
|
||||
void __sas_drain_work(struct sas_ha_struct *ha)
|
||||
{
|
||||
struct workqueue_struct *wq = ha->core.shost->work_q;
|
||||
struct sas_work *sw, *_sw;
|
||||
int ret;
|
||||
|
||||
set_bit(SAS_HA_DRAINING, &ha->state);
|
||||
/* flush submitters */
|
||||
spin_lock_irq(&ha->lock);
|
||||
spin_unlock_irq(&ha->lock);
|
||||
|
||||
drain_workqueue(wq);
|
||||
drain_workqueue(ha->event_q);
|
||||
drain_workqueue(ha->disco_q);
|
||||
|
||||
spin_lock_irq(&ha->lock);
|
||||
clear_bit(SAS_HA_DRAINING, &ha->state);
|
||||
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
|
||||
list_del_init(&sw->drain_node);
|
||||
sas_queue_work(ha, sw);
|
||||
ret = sas_queue_work(ha, sw);
|
||||
if (ret != 1)
|
||||
sas_free_event(to_asd_sas_event(&sw->work));
|
||||
|
||||
}
|
||||
spin_unlock_irq(&ha->lock);
|
||||
}
|
||||
@ -115,33 +116,78 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
|
||||
struct asd_sas_port *port = ha->sas_port[i];
|
||||
const int ev = DISCE_REVALIDATE_DOMAIN;
|
||||
struct sas_discovery *d = &port->disc;
|
||||
struct asd_sas_phy *sas_phy;
|
||||
|
||||
if (!test_and_clear_bit(ev, &d->pending))
|
||||
continue;
|
||||
|
||||
sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
|
||||
if (list_empty(&port->phy_list))
|
||||
continue;
|
||||
|
||||
sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
|
||||
port_phy_el);
|
||||
ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
|
||||
}
|
||||
mutex_unlock(&ha->disco_mutex);
|
||||
}
|
||||
|
||||
|
||||
static void sas_port_event_worker(struct work_struct *work)
|
||||
{
|
||||
struct asd_sas_event *ev = to_asd_sas_event(work);
|
||||
|
||||
sas_port_event_fns[ev->event](work);
|
||||
sas_free_event(ev);
|
||||
}
|
||||
|
||||
static void sas_phy_event_worker(struct work_struct *work)
|
||||
{
|
||||
struct asd_sas_event *ev = to_asd_sas_event(work);
|
||||
|
||||
sas_phy_event_fns[ev->event](work);
|
||||
sas_free_event(ev);
|
||||
}
|
||||
|
||||
static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
|
||||
{
|
||||
struct asd_sas_event *ev;
|
||||
struct sas_ha_struct *ha = phy->ha;
|
||||
int ret;
|
||||
|
||||
BUG_ON(event >= PORT_NUM_EVENTS);
|
||||
|
||||
return sas_queue_event(event, &phy->port_events_pending,
|
||||
&phy->port_events[event].work, ha);
|
||||
ev = sas_alloc_event(phy);
|
||||
if (!ev)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
|
||||
|
||||
ret = sas_queue_event(event, &ev->work, ha);
|
||||
if (ret != 1)
|
||||
sas_free_event(ev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
|
||||
{
|
||||
struct asd_sas_event *ev;
|
||||
struct sas_ha_struct *ha = phy->ha;
|
||||
int ret;
|
||||
|
||||
BUG_ON(event >= PHY_NUM_EVENTS);
|
||||
|
||||
return sas_queue_event(event, &phy->phy_events_pending,
|
||||
&phy->phy_events[event].work, ha);
|
||||
ev = sas_alloc_event(phy);
|
||||
if (!ev)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
|
||||
|
||||
ret = sas_queue_event(event, &ev->work, ha);
|
||||
if (ret != 1)
|
||||
sas_free_event(ev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sas_init_events(struct sas_ha_struct *sas_ha)
|
||||
|
@ -293,6 +293,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
|
||||
phy->phy->minimum_linkrate = dr->pmin_linkrate;
|
||||
phy->phy->maximum_linkrate = dr->pmax_linkrate;
|
||||
phy->phy->negotiated_linkrate = phy->linkrate;
|
||||
phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
|
||||
|
||||
skip:
|
||||
if (new_phy)
|
||||
@ -686,7 +687,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
|
||||
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
|
||||
resp, RPEL_RESP_SIZE);
|
||||
|
||||
if (!res)
|
||||
if (res)
|
||||
goto out;
|
||||
|
||||
phy->invalid_dword_count = scsi_to_u32(&resp[12]);
|
||||
@ -695,6 +696,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
|
||||
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
|
||||
|
||||
out:
|
||||
kfree(req);
|
||||
kfree(resp);
|
||||
return res;
|
||||
|
||||
@ -1914,7 +1916,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
|
||||
sas_port_delete_phy(phy->port, phy->phy);
|
||||
sas_device_set_phy(found, phy->port);
|
||||
if (phy->port->num_phys == 0)
|
||||
sas_port_delete(phy->port);
|
||||
list_add_tail(&phy->port->del_list,
|
||||
&parent->port->sas_port_del_list);
|
||||
phy->port = NULL;
|
||||
}
|
||||
}
|
||||
@ -2122,7 +2125,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
|
||||
struct domain_device *dev = NULL;
|
||||
|
||||
res = sas_find_bcast_dev(port_dev, &dev);
|
||||
while (res == 0 && dev) {
|
||||
if (res == 0 && dev) {
|
||||
struct expander_device *ex = &dev->ex_dev;
|
||||
int i = 0, phy_id;
|
||||
|
||||
@ -2134,9 +2137,6 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
|
||||
res = sas_rediscover(dev, phy_id);
|
||||
i = phy_id + 1;
|
||||
} while (i < ex->num_phys);
|
||||
|
||||
dev = NULL;
|
||||
res = sas_find_bcast_dev(port_dev, &dev);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include "../scsi_sas_internal.h"
|
||||
|
||||
static struct kmem_cache *sas_task_cache;
|
||||
static struct kmem_cache *sas_event_cache;
|
||||
|
||||
struct sas_task *sas_alloc_task(gfp_t flags)
|
||||
{
|
||||
@ -109,6 +110,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
|
||||
|
||||
int sas_register_ha(struct sas_ha_struct *sas_ha)
|
||||
{
|
||||
char name[64];
|
||||
int error = 0;
|
||||
|
||||
mutex_init(&sas_ha->disco_mutex);
|
||||
@ -122,6 +124,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
|
||||
INIT_LIST_HEAD(&sas_ha->defer_q);
|
||||
INIT_LIST_HEAD(&sas_ha->eh_dev_q);
|
||||
|
||||
sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
|
||||
|
||||
error = sas_register_phys(sas_ha);
|
||||
if (error) {
|
||||
printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
|
||||
@ -140,10 +144,24 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
|
||||
goto Undo_ports;
|
||||
}
|
||||
|
||||
error = -ENOMEM;
|
||||
snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
|
||||
sas_ha->event_q = create_singlethread_workqueue(name);
|
||||
if (!sas_ha->event_q)
|
||||
goto Undo_ports;
|
||||
|
||||
snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
|
||||
sas_ha->disco_q = create_singlethread_workqueue(name);
|
||||
if (!sas_ha->disco_q)
|
||||
goto Undo_event_q;
|
||||
|
||||
INIT_LIST_HEAD(&sas_ha->eh_done_q);
|
||||
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
|
||||
|
||||
return 0;
|
||||
|
||||
Undo_event_q:
|
||||
destroy_workqueue(sas_ha->event_q);
|
||||
Undo_ports:
|
||||
sas_unregister_ports(sas_ha);
|
||||
Undo_phys:
|
||||
@ -174,6 +192,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
|
||||
__sas_drain_work(sas_ha);
|
||||
mutex_unlock(&sas_ha->drain_mutex);
|
||||
|
||||
destroy_workqueue(sas_ha->disco_q);
|
||||
destroy_workqueue(sas_ha->event_q);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -364,8 +385,6 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
|
||||
struct asd_sas_phy *phy = ha->sas_phy[i];
|
||||
|
||||
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
|
||||
phy->port_events_pending = 0;
|
||||
phy->phy_events_pending = 0;
|
||||
phy->frame_rcvd_size = 0;
|
||||
}
|
||||
}
|
||||
@ -537,6 +556,37 @@ static struct sas_function_template sft = {
|
||||
.smp_handler = sas_smp_handler,
|
||||
};
|
||||
|
||||
static inline ssize_t phy_event_threshold_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
|
||||
}
|
||||
|
||||
static inline ssize_t phy_event_threshold_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
|
||||
|
||||
sha->event_thres = simple_strtol(buf, NULL, 10);
|
||||
|
||||
/* threshold cannot be set too small */
|
||||
if (sha->event_thres < 32)
|
||||
sha->event_thres = 32;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
DEVICE_ATTR(phy_event_threshold,
|
||||
S_IRUGO|S_IWUSR,
|
||||
phy_event_threshold_show,
|
||||
phy_event_threshold_store);
|
||||
EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
|
||||
|
||||
struct scsi_transport_template *
|
||||
sas_domain_attach_transport(struct sas_domain_function_template *dft)
|
||||
{
|
||||
@ -555,20 +605,71 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
|
||||
|
||||
|
||||
struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
|
||||
{
|
||||
struct asd_sas_event *event;
|
||||
gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
|
||||
struct sas_ha_struct *sas_ha = phy->ha;
|
||||
struct sas_internal *i =
|
||||
to_sas_internal(sas_ha->core.shost->transportt);
|
||||
|
||||
event = kmem_cache_zalloc(sas_event_cache, flags);
|
||||
if (!event)
|
||||
return NULL;
|
||||
|
||||
atomic_inc(&phy->event_nr);
|
||||
|
||||
if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
|
||||
if (i->dft->lldd_control_phy) {
|
||||
if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
|
||||
sas_printk("The phy%02d bursting events, shut it down.\n",
|
||||
phy->id);
|
||||
sas_notify_phy_event(phy, PHYE_SHUTDOWN);
|
||||
}
|
||||
} else {
|
||||
/* Do not support PHY control, stop allocating events */
|
||||
WARN_ONCE(1, "PHY control not supported.\n");
|
||||
kmem_cache_free(sas_event_cache, event);
|
||||
atomic_dec(&phy->event_nr);
|
||||
event = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
void sas_free_event(struct asd_sas_event *event)
|
||||
{
|
||||
struct asd_sas_phy *phy = event->phy;
|
||||
|
||||
kmem_cache_free(sas_event_cache, event);
|
||||
atomic_dec(&phy->event_nr);
|
||||
}
|
||||
|
||||
/* ---------- SAS Class register/unregister ---------- */
|
||||
|
||||
static int __init sas_class_init(void)
|
||||
{
|
||||
sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
|
||||
if (!sas_task_cache)
|
||||
return -ENOMEM;
|
||||
goto out;
|
||||
|
||||
sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
|
||||
if (!sas_event_cache)
|
||||
goto free_task_kmem;
|
||||
|
||||
return 0;
|
||||
free_task_kmem:
|
||||
kmem_cache_destroy(sas_task_cache);
|
||||
out:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __exit sas_class_exit(void)
|
||||
{
|
||||
kmem_cache_destroy(sas_task_cache);
|
||||
kmem_cache_destroy(sas_event_cache);
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
|
||||
|
@ -61,6 +61,9 @@ int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
|
||||
int sas_register_phys(struct sas_ha_struct *sas_ha);
|
||||
void sas_unregister_phys(struct sas_ha_struct *sas_ha);
|
||||
|
||||
struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
|
||||
void sas_free_event(struct asd_sas_event *event);
|
||||
|
||||
int sas_register_ports(struct sas_ha_struct *sas_ha);
|
||||
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
|
||||
|
||||
@ -98,6 +101,10 @@ int sas_try_ata_reset(struct asd_sas_phy *phy);
|
||||
void sas_hae_reset(struct work_struct *work);
|
||||
|
||||
void sas_free_device(struct kref *kref);
|
||||
void sas_destruct_devices(struct asd_sas_port *port);
|
||||
|
||||
extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS];
|
||||
extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS];
|
||||
|
||||
#ifdef CONFIG_SCSI_SAS_HOST_SMP
|
||||
extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
|
||||
|
@ -35,7 +35,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
|
||||
struct asd_sas_event *ev = to_asd_sas_event(work);
|
||||
struct asd_sas_phy *phy = ev->phy;
|
||||
|
||||
clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
|
||||
phy->in_shutdown = 0;
|
||||
phy->error = 0;
|
||||
sas_deform_port(phy, 1);
|
||||
}
|
||||
@ -45,7 +45,7 @@ static void sas_phye_oob_done(struct work_struct *work)
|
||||
struct asd_sas_event *ev = to_asd_sas_event(work);
|
||||
struct asd_sas_phy *phy = ev->phy;
|
||||
|
||||
clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
|
||||
phy->in_shutdown = 0;
|
||||
phy->error = 0;
|
||||
}
|
||||
|
||||
@ -58,8 +58,6 @@ static void sas_phye_oob_error(struct work_struct *work)
|
||||
struct sas_internal *i =
|
||||
to_sas_internal(sas_ha->core.shost->transportt);
|
||||
|
||||
clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
|
||||
|
||||
sas_deform_port(phy, 1);
|
||||
|
||||
if (!port && phy->enabled && i->dft->lldd_control_phy) {
|
||||
@ -88,8 +86,6 @@ static void sas_phye_spinup_hold(struct work_struct *work)
|
||||
struct sas_internal *i =
|
||||
to_sas_internal(sas_ha->core.shost->transportt);
|
||||
|
||||
clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
|
||||
|
||||
phy->error = 0;
|
||||
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
|
||||
}
|
||||
@ -99,8 +95,6 @@ static void sas_phye_resume_timeout(struct work_struct *work)
|
||||
struct asd_sas_event *ev = to_asd_sas_event(work);
|
||||
struct asd_sas_phy *phy = ev->phy;
|
||||
|
||||
clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
|
||||
|
||||
/* phew, lldd got the phy back in the nick of time */
|
||||
if (!phy->suspended) {
|
||||
dev_info(&phy->phy->dev, "resume timeout cancelled\n");
|
||||
@ -113,45 +107,41 @@ static void sas_phye_resume_timeout(struct work_struct *work)
|
||||
}
|
||||
|
||||
|
||||
static void sas_phye_shutdown(struct work_struct *work)
|
||||
{
|
||||
struct asd_sas_event *ev = to_asd_sas_event(work);
|
||||
struct asd_sas_phy *phy = ev->phy;
|
||||
struct sas_ha_struct *sas_ha = phy->ha;
|
||||
struct sas_internal *i =
|
||||
to_sas_internal(sas_ha->core.shost->transportt);
|
||||
|
||||
if (phy->enabled) {
|
||||
int ret;
|
||||
|
||||
phy->error = 0;
|
||||
phy->enabled = 0;
|
||||
ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
|
||||
if (ret)
|
||||
sas_printk("lldd disable phy%02d returned %d\n",
|
||||
phy->id, ret);
|
||||
} else
|
||||
sas_printk("phy%02d is not enabled, cannot shutdown\n",
|
||||
phy->id);
|
||||
}
|
||||
|
||||
/* ---------- Phy class registration ---------- */
|
||||
|
||||
int sas_register_phys(struct sas_ha_struct *sas_ha)
|
||||
{
|
||||
int i;
|
||||
|
||||
static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
|
||||
[PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
|
||||
[PHYE_OOB_DONE] = sas_phye_oob_done,
|
||||
[PHYE_OOB_ERROR] = sas_phye_oob_error,
|
||||
[PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
|
||||
[PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
|
||||
|
||||
};
|
||||
|
||||
static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
|
||||
[PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
|
||||
[PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
|
||||
[PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
|
||||
[PORTE_TIMER_EVENT] = sas_porte_timer_event,
|
||||
[PORTE_HARD_RESET] = sas_porte_hard_reset,
|
||||
};
|
||||
|
||||
/* Now register the phys. */
|
||||
for (i = 0; i < sas_ha->num_phys; i++) {
|
||||
int k;
|
||||
struct asd_sas_phy *phy = sas_ha->sas_phy[i];
|
||||
|
||||
phy->error = 0;
|
||||
atomic_set(&phy->event_nr, 0);
|
||||
INIT_LIST_HEAD(&phy->port_phy_el);
|
||||
for (k = 0; k < PORT_NUM_EVENTS; k++) {
|
||||
INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
|
||||
phy->port_events[k].phy = phy;
|
||||
}
|
||||
|
||||
for (k = 0; k < PHY_NUM_EVENTS; k++) {
|
||||
INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
|
||||
phy->phy_events[k].phy = phy;
|
||||
}
|
||||
|
||||
phy->port = NULL;
|
||||
phy->ha = sas_ha;
|
||||
@ -179,3 +169,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
|
||||
[PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
|
||||
[PHYE_OOB_DONE] = sas_phye_oob_done,
|
||||
[PHYE_OOB_ERROR] = sas_phye_oob_error,
|
||||
[PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
|
||||
[PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
|
||||
[PHYE_SHUTDOWN] = sas_phye_shutdown,
|
||||
};
|
||||
|
@ -66,6 +66,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
|
||||
rc = sas_notify_lldd_dev_found(dev);
|
||||
if (rc) {
|
||||
sas_unregister_dev(port, dev);
|
||||
sas_destruct_devices(port);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -192,6 +193,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
|
||||
si->dft->lldd_port_formed(phy);
|
||||
|
||||
sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
|
||||
flush_workqueue(sas_ha->disco_q);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -219,6 +221,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
|
||||
|
||||
if (port->num_phys == 1) {
|
||||
sas_unregister_domain_devices(port, gone);
|
||||
sas_destruct_devices(port);
|
||||
sas_port_delete(port->port);
|
||||
port->port = NULL;
|
||||
} else {
|
||||
@ -261,8 +264,6 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
|
||||
struct asd_sas_event *ev = to_asd_sas_event(work);
|
||||
struct asd_sas_phy *phy = ev->phy;
|
||||
|
||||
clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
|
||||
|
||||
sas_form_port(phy);
|
||||
}
|
||||
|
||||
@ -273,14 +274,15 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
|
||||
unsigned long flags;
|
||||
u32 prim;
|
||||
|
||||
clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
|
||||
|
||||
spin_lock_irqsave(&phy->sas_prim_lock, flags);
|
||||
prim = phy->sas_prim;
|
||||
spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
|
||||
|
||||
SAS_DPRINTK("broadcast received: %d\n", prim);
|
||||
sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
|
||||
|
||||
if (phy->port)
|
||||
flush_workqueue(phy->port->ha->disco_q);
|
||||
}
|
||||
|
||||
void sas_porte_link_reset_err(struct work_struct *work)
|
||||
@ -288,8 +290,6 @@ void sas_porte_link_reset_err(struct work_struct *work)
|
||||
struct asd_sas_event *ev = to_asd_sas_event(work);
|
||||
struct asd_sas_phy *phy = ev->phy;
|
||||
|
||||
clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
|
||||
|
||||
sas_deform_port(phy, 1);
|
||||
}
|
||||
|
||||
@ -298,8 +298,6 @@ void sas_porte_timer_event(struct work_struct *work)
|
||||
struct asd_sas_event *ev = to_asd_sas_event(work);
|
||||
struct asd_sas_phy *phy = ev->phy;
|
||||
|
||||
clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
|
||||
|
||||
sas_deform_port(phy, 1);
|
||||
}
|
||||
|
||||
@ -308,8 +306,6 @@ void sas_porte_hard_reset(struct work_struct *work)
|
||||
struct asd_sas_event *ev = to_asd_sas_event(work);
|
||||
struct asd_sas_phy *phy = ev->phy;
|
||||
|
||||
clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
|
||||
|
||||
sas_deform_port(phy, 1);
|
||||
}
|
||||
|
||||
@ -323,6 +319,7 @@ static void sas_init_port(struct asd_sas_port *port,
|
||||
INIT_LIST_HEAD(&port->dev_list);
|
||||
INIT_LIST_HEAD(&port->disco_list);
|
||||
INIT_LIST_HEAD(&port->destroy_list);
|
||||
INIT_LIST_HEAD(&port->sas_port_del_list);
|
||||
spin_lock_init(&port->phy_list_lock);
|
||||
INIT_LIST_HEAD(&port->phy_list);
|
||||
port->ha = sas_ha;
|
||||
@ -353,3 +350,11 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
|
||||
sas_deform_port(sas_ha->sas_phy[i], 0);
|
||||
|
||||
}
|
||||
|
||||
const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
|
||||
[PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
|
||||
[PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
|
||||
[PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
|
||||
[PORTE_TIMER_EVENT] = sas_porte_timer_event,
|
||||
[PORTE_HARD_RESET] = sas_porte_hard_reset,
|
||||
};
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "sas_internal.h"
|
||||
|
||||
@ -959,21 +960,6 @@ void sas_target_destroy(struct scsi_target *starget)
|
||||
sas_put_device(found_dev);
|
||||
}
|
||||
|
||||
static void sas_parse_addr(u8 *sas_addr, const char *p)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < SAS_ADDR_SIZE; i++) {
|
||||
u8 h, l;
|
||||
if (!*p)
|
||||
break;
|
||||
h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
|
||||
p++;
|
||||
l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
|
||||
p++;
|
||||
sas_addr[i] = (h<<4) | l;
|
||||
}
|
||||
}
|
||||
|
||||
#define SAS_STRING_ADDR_SIZE 16
|
||||
|
||||
int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
|
||||
@ -990,7 +976,9 @@ int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
|
||||
goto out;
|
||||
}
|
||||
|
||||
sas_parse_addr(addr, fw->data);
|
||||
res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
|
||||
if (res)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
release_firmware(fw);
|
||||
|
@ -55,9 +55,10 @@ struct lpfc_sli2_slim;
|
||||
#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
|
||||
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
|
||||
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
|
||||
#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
|
||||
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
|
||||
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
|
||||
#define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */
|
||||
#define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */
|
||||
|
||||
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
|
||||
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
|
||||
@ -705,7 +706,6 @@ struct lpfc_hba {
|
||||
* capability
|
||||
*/
|
||||
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
|
||||
#define NVME_XRI_ABORT_EVENT 0x100000
|
||||
|
||||
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
|
||||
struct lpfc_dmabuf slim2p;
|
||||
@ -945,6 +945,8 @@ struct lpfc_hba {
|
||||
struct list_head lpfc_nvme_buf_list_get;
|
||||
struct list_head lpfc_nvme_buf_list_put;
|
||||
uint32_t total_nvme_bufs;
|
||||
uint32_t get_nvme_bufs;
|
||||
uint32_t put_nvme_bufs;
|
||||
struct list_head lpfc_iocb_list;
|
||||
uint32_t total_iocbq_bufs;
|
||||
struct list_head active_rrq_list;
|
||||
|
@ -148,6 +148,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct nvme_fc_remote_port *nrport;
|
||||
uint64_t data1, data2, data3, tot;
|
||||
@ -198,10 +199,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
}
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
|
||||
"LS: Xmt %08x Drop %08x Cmpl %08x\n",
|
||||
atomic_read(&tgtp->xmt_ls_rsp),
|
||||
atomic_read(&tgtp->xmt_ls_drop),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_cmpl),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_cmpl));
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"LS: RSP Abort %08x xb %08x Err %08x\n",
|
||||
atomic_read(&tgtp->xmt_ls_rsp_aborted),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_xb_set),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_error));
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
@ -236,6 +242,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_drop));
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_aborted),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
|
||||
atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"ABORT: Xmt %08x Cmpl %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_abort),
|
||||
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
|
||||
@ -271,6 +283,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
wwn_to_u64(vport->fc_portname.u.wwn));
|
||||
return len;
|
||||
}
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
@ -347,9 +360,16 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"LS: Xmt %016x Cmpl %016x\n",
|
||||
"LS: Xmt %010x Cmpl %010x Abort %08x\n",
|
||||
atomic_read(&phba->fc4NvmeLsRequests),
|
||||
atomic_read(&phba->fc4NvmeLsCmpls));
|
||||
atomic_read(&phba->fc4NvmeLsCmpls),
|
||||
atomic_read(&lport->xmt_ls_abort));
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
|
||||
atomic_read(&lport->xmt_ls_err),
|
||||
atomic_read(&lport->cmpl_ls_xb),
|
||||
atomic_read(&lport->cmpl_ls_err));
|
||||
|
||||
tot = atomic_read(&phba->fc4NvmeIoCmpls);
|
||||
data1 = atomic_read(&phba->fc4NvmeInputRequests);
|
||||
@ -360,8 +380,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
data1, data2, data3);
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
" Cmpl %016llx Outstanding %016llx\n",
|
||||
tot, (data1 + data2 + data3) - tot);
|
||||
" noxri %08x nondlp %08x qdepth %08x "
|
||||
"wqerr %08x\n",
|
||||
atomic_read(&lport->xmt_fcp_noxri),
|
||||
atomic_read(&lport->xmt_fcp_bad_ndlp),
|
||||
atomic_read(&lport->xmt_fcp_qdepth),
|
||||
atomic_read(&lport->xmt_fcp_wqerr));
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
" Cmpl %016llx Outstanding %016llx Abort %08x\n",
|
||||
tot, ((data1 + data2 + data3) - tot),
|
||||
atomic_read(&lport->xmt_fcp_abort));
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"FCP CMPL: xb %08x Err %08x\n",
|
||||
atomic_read(&lport->cmpl_fcp_xb),
|
||||
atomic_read(&lport->cmpl_fcp_err));
|
||||
return len;
|
||||
}
|
||||
|
||||
@ -3366,12 +3400,13 @@ LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
|
||||
|
||||
/*
|
||||
* lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
|
||||
* lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
|
||||
* lpfc_nvmet_mrq = 1 use a single RQ pair
|
||||
* lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
|
||||
*
|
||||
*/
|
||||
LPFC_ATTR_R(nvmet_mrq,
|
||||
1, 1, 16,
|
||||
LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
|
||||
"Specify number of RQ pairs for processing NVMET cmds");
|
||||
|
||||
/*
|
||||
@ -5139,7 +5174,7 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
|
||||
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4
|
||||
* and will be limited to 512 if BlockGuard is enabled under SLI3.
|
||||
*/
|
||||
LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
|
||||
LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
|
||||
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
|
||||
|
||||
/*
|
||||
@ -6362,6 +6397,9 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
|
||||
phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
|
||||
}
|
||||
|
||||
if (!phba->cfg_nvmet_mrq)
|
||||
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
|
||||
|
||||
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
|
||||
if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
|
||||
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
|
||||
@ -6369,10 +6407,13 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
|
||||
"6018 Adjust lpfc_nvmet_mrq to %d\n",
|
||||
phba->cfg_nvmet_mrq);
|
||||
}
|
||||
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
|
||||
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
|
||||
|
||||
} else {
|
||||
/* Not NVME Target mode. Turn off Target parameters. */
|
||||
phba->nvmet_support = 0;
|
||||
phba->cfg_nvmet_mrq = 0;
|
||||
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
|
||||
phba->cfg_nvmet_fb_size = 0;
|
||||
}
|
||||
|
||||
|
@ -254,6 +254,8 @@ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
|
||||
struct lpfc_nvmet_ctxbuf *ctxp);
|
||||
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
|
||||
struct fc_frame_header *fc_hdr);
|
||||
void lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba);
|
||||
void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba);
|
||||
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
|
||||
uint16_t);
|
||||
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
|
||||
|
@ -471,6 +471,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
|
||||
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
|
||||
Did, ndlp->nlp_flag, vport->fc_flag);
|
||||
|
||||
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
|
||||
/* By default, the driver expects to support FCP FC4 */
|
||||
if (fc4_type == FC_TYPE_FCP)
|
||||
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
|
||||
@ -685,6 +686,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
lpfc_els_flush_rscn(vport);
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
if (vport->fc_flag & FC_RSCN_DEFERRED) {
|
||||
vport->fc_flag &= ~FC_RSCN_DEFERRED;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
/*
|
||||
* Skip processing the NS response
|
||||
* Re-issue the NS cmd
|
||||
*/
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"0151 Process Deferred RSCN Data: x%x x%x\n",
|
||||
vport->fc_flag, vport->fc_rscn_id_cnt);
|
||||
lpfc_els_handle_rscn(vport);
|
||||
|
||||
goto out;
|
||||
}
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
if (irsp->ulpStatus) {
|
||||
/* Check for retry */
|
||||
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
|
||||
|
@ -750,6 +750,8 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
uint64_t tot, data1, data2, data3;
|
||||
int len = 0;
|
||||
int cnt;
|
||||
@ -775,10 +777,15 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
|
||||
"LS: Xmt %08x Drop %08x Cmpl %08x\n",
|
||||
atomic_read(&tgtp->xmt_ls_rsp),
|
||||
atomic_read(&tgtp->xmt_ls_drop),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_cmpl),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_cmpl));
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"LS: RSP Abort %08x xb %08x Err %08x\n",
|
||||
atomic_read(&tgtp->xmt_ls_rsp_aborted),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_xb_set),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_error));
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
@ -811,6 +818,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_error),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_drop));
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_aborted),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
|
||||
atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"ABORT: Xmt %08x Cmpl %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_abort),
|
||||
@ -885,8 +898,38 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
data1, data2, data3);
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
" Cmpl %016llx Outstanding %016llx\n",
|
||||
" Cmpl %016llx Outstanding %016llx\n",
|
||||
tot, (data1 + data2 + data3) - tot);
|
||||
|
||||
localport = vport->localport;
|
||||
if (!localport)
|
||||
return len;
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
if (!lport)
|
||||
return len;
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"LS Xmt Err: Abrt %08x Err %08x "
|
||||
"Cmpl Err: xb %08x Err %08x\n",
|
||||
atomic_read(&lport->xmt_ls_abort),
|
||||
atomic_read(&lport->xmt_ls_err),
|
||||
atomic_read(&lport->cmpl_ls_xb),
|
||||
atomic_read(&lport->cmpl_ls_err));
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"FCP Xmt Err: noxri %06x nondlp %06x "
|
||||
"qdepth %06x wqerr %06x Abrt %06x\n",
|
||||
atomic_read(&lport->xmt_fcp_noxri),
|
||||
atomic_read(&lport->xmt_fcp_bad_ndlp),
|
||||
atomic_read(&lport->xmt_fcp_qdepth),
|
||||
atomic_read(&lport->xmt_fcp_wqerr),
|
||||
atomic_read(&lport->xmt_fcp_abort));
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"FCP Cmpl Err: xb %08x Err %08x\n",
|
||||
atomic_read(&lport->cmpl_fcp_xb),
|
||||
atomic_read(&lport->cmpl_fcp_err));
|
||||
|
||||
}
|
||||
|
||||
return len;
|
||||
@ -3213,7 +3256,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (eqidx < phba->cfg_nvmet_mrq) {
|
||||
if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
|
||||
/* NVMET CQset */
|
||||
qp = phba->sli4_hba.nvmet_cqset[eqidx];
|
||||
*len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
|
||||
@ -3246,7 +3289,7 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
|
||||
|
||||
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
|
||||
"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
|
||||
"bs:x%x proc:x%llx eqd %d]\n",
|
||||
"cqe_proc:x%x eqe_proc:x%llx eqd %d]\n",
|
||||
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
|
||||
(unsigned long long)qp->q_cnt_4, qp->q_mode);
|
||||
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
|
||||
@ -3366,6 +3409,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
|
||||
if (len >= max_cnt)
|
||||
goto too_big;
|
||||
|
||||
qp = phba->sli4_hba.hdr_rq;
|
||||
len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
|
||||
"ELS RQpair", pbuffer, len);
|
||||
if (len >= max_cnt)
|
||||
goto too_big;
|
||||
|
||||
/* Slow-path NVME LS response CQ */
|
||||
qp = phba->sli4_hba.nvmels_cq;
|
||||
len = __lpfc_idiag_print_cq(qp, "NVME LS",
|
||||
@ -3383,12 +3432,6 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
|
||||
if (len >= max_cnt)
|
||||
goto too_big;
|
||||
|
||||
qp = phba->sli4_hba.hdr_rq;
|
||||
len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
|
||||
"RQpair", pbuffer, len);
|
||||
if (len >= max_cnt)
|
||||
goto too_big;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -134,6 +134,8 @@ struct lpfc_nodelist {
|
||||
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
|
||||
uint32_t fc4_prli_sent;
|
||||
uint32_t upcall_flags;
|
||||
#define NLP_WAIT_FOR_UNREG 0x1
|
||||
|
||||
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
|
||||
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
|
||||
};
|
||||
|
@ -858,6 +858,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
vport->fc_flag |= FC_PT2PT;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
/* If we are pt2pt with another NPort, force NPIV off! */
|
||||
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
|
||||
|
||||
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
|
||||
if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
|
||||
lpfc_unregister_fcf_prep(phba);
|
||||
@ -916,28 +919,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
spin_lock_irq(shost->host_lock);
|
||||
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
} else
|
||||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mbox)
|
||||
goto fail;
|
||||
|
||||
lpfc_config_link(phba, mbox);
|
||||
|
||||
mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
|
||||
mbox->vport = vport;
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
goto fail;
|
||||
}
|
||||
} else {
|
||||
/* This side will wait for the PLOGI, decrement ndlp reference
|
||||
* count indicating that ndlp can be released when other
|
||||
* references to it are done.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
|
||||
/* If we are pt2pt with another NPort, force NPIV off! */
|
||||
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
|
||||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mbox)
|
||||
goto fail;
|
||||
|
||||
lpfc_config_link(phba, mbox);
|
||||
|
||||
mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
|
||||
mbox->vport = vport;
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
goto fail;
|
||||
/* Start discovery - this should just do CLEAR_LA */
|
||||
lpfc_disc_start(vport);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1030,30 +1034,31 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
|
||||
stop_rr_fcf_flogi:
|
||||
/* FLOGI failure */
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
||||
"2858 FLOGI failure Status:x%x/x%x TMO:x%x "
|
||||
"Data x%x x%x\n",
|
||||
irsp->ulpStatus, irsp->un.ulpWord[4],
|
||||
irsp->ulpTimeout, phba->hba_flag,
|
||||
phba->fcf.fcf_flag);
|
||||
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
|
||||
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
|
||||
IOERR_LOOP_OPEN_FAILURE)))
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
||||
"2858 FLOGI failure Status:x%x/x%x "
|
||||
"TMO:x%x Data x%x x%x\n",
|
||||
irsp->ulpStatus, irsp->un.ulpWord[4],
|
||||
irsp->ulpTimeout, phba->hba_flag,
|
||||
phba->fcf.fcf_flag);
|
||||
|
||||
/* Check for retry */
|
||||
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
|
||||
goto out;
|
||||
|
||||
/* FLOGI failure */
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
||||
"0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
|
||||
irsp->ulpStatus, irsp->un.ulpWord[4],
|
||||
irsp->ulpTimeout);
|
||||
|
||||
|
||||
/* If this is not a loop open failure, bail out */
|
||||
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
|
||||
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
|
||||
IOERR_LOOP_OPEN_FAILURE)))
|
||||
goto flogifail;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
|
||||
"0150 FLOGI failure Status:x%x/x%x TMO:x%x\n",
|
||||
irsp->ulpStatus, irsp->un.ulpWord[4],
|
||||
irsp->ulpTimeout);
|
||||
|
||||
/* FLOGI failed, so there is no fabric */
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
|
||||
@ -1670,6 +1675,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
|
||||
|
||||
/* Two ndlps cannot have the same did on the nodelist */
|
||||
ndlp->nlp_DID = keepDID;
|
||||
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
|
||||
if (phba->sli_rev == LPFC_SLI_REV4 &&
|
||||
active_rrqs_xri_bitmap)
|
||||
memcpy(ndlp->active_rrqs_xri_bitmap,
|
||||
@ -2088,6 +2094,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
|
||||
spin_lock_irq(shost->host_lock);
|
||||
ndlp->nlp_flag &= ~NLP_PRLI_SND;
|
||||
|
||||
/* Driver supports multiple FC4 types. Counters matter. */
|
||||
vport->fc_prli_sent--;
|
||||
ndlp->fc4_prli_sent--;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
|
||||
@ -2095,9 +2105,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
irsp->ulpStatus, irsp->un.ulpWord[4],
|
||||
ndlp->nlp_DID);
|
||||
|
||||
/* Ddriver supports multiple FC4 types. Counters matter. */
|
||||
vport->fc_prli_sent--;
|
||||
|
||||
/* PRLI completes to NPort <nlp_DID> */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"0103 PRLI completes to NPort x%06x "
|
||||
@ -2111,7 +2118,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
|
||||
if (irsp->ulpStatus) {
|
||||
/* Check for retry */
|
||||
ndlp->fc4_prli_sent--;
|
||||
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
|
||||
/* ELS command is being retried */
|
||||
goto out;
|
||||
@ -2190,6 +2196,15 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
|
||||
local_nlp_type = ndlp->nlp_fc4_type;
|
||||
|
||||
/* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
|
||||
* fields here before any of them can complete.
|
||||
*/
|
||||
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
|
||||
ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
|
||||
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
|
||||
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
|
||||
ndlp->nvme_fb_size = 0;
|
||||
|
||||
send_next_prli:
|
||||
if (local_nlp_type & NLP_FC4_FCP) {
|
||||
/* Payload is 4 + 16 = 20 x14 bytes. */
|
||||
@ -2298,6 +2313,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
|
||||
spin_lock_irq(shost->host_lock);
|
||||
ndlp->nlp_flag |= NLP_PRLI_SND;
|
||||
|
||||
/* The vport counters are used for lpfc_scan_finished, but
|
||||
* the ndlp is used to track outstanding PRLIs for different
|
||||
* FC4 types.
|
||||
*/
|
||||
vport->fc_prli_sent++;
|
||||
ndlp->fc4_prli_sent++;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
|
||||
IOCB_ERROR) {
|
||||
@ -2308,12 +2330,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* The vport counters are used for lpfc_scan_finished, but
|
||||
* the ndlp is used to track outstanding PRLIs for different
|
||||
* FC4 types.
|
||||
*/
|
||||
vport->fc_prli_sent++;
|
||||
ndlp->fc4_prli_sent++;
|
||||
|
||||
/* The driver supports 2 FC4 types. Make sure
|
||||
* a PRLI is issued for all types before exiting.
|
||||
@ -2951,8 +2967,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
|
||||
/* This will cause the callback-function lpfc_cmpl_els_cmd to
|
||||
* trigger the release of node.
|
||||
*/
|
||||
|
||||
lpfc_nlp_put(ndlp);
|
||||
if (!(vport->fc_flag & FC_PT2PT))
|
||||
lpfc_nlp_put(ndlp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -6172,9 +6188,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
|
||||
/* send RECOVERY event for ALL nodes that match RSCN payload */
|
||||
lpfc_rscn_recovery_check(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag &= ~FC_RSCN_DEFERRED;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
return 0;
|
||||
}
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
|
||||
@ -6849,7 +6862,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
return 1;
|
||||
|
||||
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
|
||||
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
|
||||
pcmd += sizeof(uint32_t); /* Skip past command */
|
||||
|
||||
/* use the command's xri in the response */
|
||||
@ -8060,13 +8073,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
rjt_exp = LSEXP_NOTHING_MORE;
|
||||
break;
|
||||
}
|
||||
|
||||
/* NVMET accepts NVME PRLI only. Reject FCP PRLI */
|
||||
if (cmd == ELS_CMD_PRLI && phba->nvmet_support) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
rjt_exp = LSEXP_REQ_UNSUPPORTED;
|
||||
break;
|
||||
}
|
||||
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
|
||||
break;
|
||||
case ELS_CMD_LIRR:
|
||||
@ -8149,9 +8155,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
lpfc_nlp_put(ndlp);
|
||||
break;
|
||||
case ELS_CMD_REC:
|
||||
/* receive this due to exchange closed */
|
||||
rjt_err = LSRJT_UNABLE_TPC;
|
||||
rjt_exp = LSEXP_INVALID_OX_RX;
|
||||
/* receive this due to exchange closed */
|
||||
rjt_err = LSRJT_UNABLE_TPC;
|
||||
rjt_exp = LSEXP_INVALID_OX_RX;
|
||||
break;
|
||||
default:
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
|
||||
|
@ -640,8 +640,6 @@ lpfc_work_done(struct lpfc_hba *phba)
|
||||
lpfc_handle_rrq_active(phba);
|
||||
if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
|
||||
lpfc_sli4_fcp_xri_abort_event_proc(phba);
|
||||
if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
|
||||
lpfc_sli4_nvme_xri_abort_event_proc(phba);
|
||||
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
|
||||
lpfc_sli4_els_xri_abort_event_proc(phba);
|
||||
if (phba->hba_flag & ASYNC_EVENT)
|
||||
@ -4178,12 +4176,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
if (vport->phba->nvmet_support == 0)
|
||||
/* Start devloss */
|
||||
lpfc_nvme_unregister_port(vport, ndlp);
|
||||
else
|
||||
if (vport->phba->nvmet_support == 0) {
|
||||
/* Start devloss if target. */
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET)
|
||||
lpfc_nvme_unregister_port(vport, ndlp);
|
||||
} else {
|
||||
/* NVMET has no upcall. */
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4207,11 +4207,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
if (vport->phba->nvmet_support == 0) {
|
||||
/* Register this rport with the transport.
|
||||
* Initiators take the NDLP ref count in
|
||||
* the register.
|
||||
* Only NVME Target Rports are registered with
|
||||
* the transport.
|
||||
*/
|
||||
vport->phba->nport_event_cnt++;
|
||||
lpfc_nvme_register_port(vport, ndlp);
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
lpfc_nvme_register_port(vport, ndlp);
|
||||
}
|
||||
} else {
|
||||
/* Just take an NDLP ref count since the
|
||||
* target does not register rports.
|
||||
@ -5838,9 +5840,12 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
|
||||
if (filter(ndlp, param)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"3185 FIND node filter %p DID "
|
||||
"Data: x%p x%x x%x\n",
|
||||
"ndlp %p did x%x flg x%x st x%x "
|
||||
"xri x%x type x%x rpi x%x\n",
|
||||
filter, ndlp, ndlp->nlp_DID,
|
||||
ndlp->nlp_flag);
|
||||
ndlp->nlp_flag, ndlp->nlp_state,
|
||||
ndlp->nlp_xri, ndlp->nlp_type,
|
||||
ndlp->nlp_rpi);
|
||||
return ndlp;
|
||||
}
|
||||
}
|
||||
|
@ -1122,6 +1122,7 @@ struct cq_context {
|
||||
#define LPFC_CQ_CNT_256 0x0
|
||||
#define LPFC_CQ_CNT_512 0x1
|
||||
#define LPFC_CQ_CNT_1024 0x2
|
||||
#define LPFC_CQ_CNT_WORD7 0x3
|
||||
uint32_t word1;
|
||||
#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */
|
||||
#define lpfc_cq_eq_id_MASK 0x000000FF
|
||||
@ -1129,7 +1130,7 @@ struct cq_context {
|
||||
#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */
|
||||
#define lpfc_cq_eq_id_2_MASK 0x0000FFFF
|
||||
#define lpfc_cq_eq_id_2_WORD word1
|
||||
uint32_t reserved0;
|
||||
uint32_t lpfc_cq_context_count; /* Version 2 Only */
|
||||
uint32_t reserved1;
|
||||
};
|
||||
|
||||
@ -1193,6 +1194,9 @@ struct lpfc_mbx_cq_create_set {
|
||||
#define lpfc_mbx_cq_create_set_arm_SHIFT 31
|
||||
#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001
|
||||
#define lpfc_mbx_cq_create_set_arm_WORD word2
|
||||
#define lpfc_mbx_cq_create_set_cq_cnt_SHIFT 16
|
||||
#define lpfc_mbx_cq_create_set_cq_cnt_MASK 0x00007FFF
|
||||
#define lpfc_mbx_cq_create_set_cq_cnt_WORD word2
|
||||
#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0
|
||||
#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_cq_create_set_num_cq_WORD word2
|
||||
|
@ -1034,6 +1034,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
LIST_HEAD(nvmet_aborts);
|
||||
unsigned long iflag = 0;
|
||||
struct lpfc_sglq *sglq_entry = NULL;
|
||||
int cnt;
|
||||
|
||||
|
||||
lpfc_sli_hbqbuf_free_all(phba);
|
||||
@ -1090,11 +1091,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
cnt = 0;
|
||||
list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
|
||||
psb->pCmd = NULL;
|
||||
psb->status = IOSTAT_SUCCESS;
|
||||
cnt++;
|
||||
}
|
||||
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
|
||||
phba->put_nvme_bufs += cnt;
|
||||
list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
|
||||
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
|
||||
|
||||
@ -3339,6 +3343,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&phba->lpfc_nvme_buf_list_put, list) {
|
||||
list_del(&lpfc_ncmd->list);
|
||||
phba->put_nvme_bufs--;
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
|
||||
lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
@ -3350,6 +3355,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&phba->lpfc_nvme_buf_list_get, list) {
|
||||
list_del(&lpfc_ncmd->list);
|
||||
phba->get_nvme_bufs--;
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
|
||||
lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
@ -3754,9 +3760,11 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
|
||||
uint16_t i, lxri, els_xri_cnt;
|
||||
uint16_t nvme_xri_cnt, nvme_xri_max;
|
||||
LIST_HEAD(nvme_sgl_list);
|
||||
int rc;
|
||||
int rc, cnt;
|
||||
|
||||
phba->total_nvme_bufs = 0;
|
||||
phba->get_nvme_bufs = 0;
|
||||
phba->put_nvme_bufs = 0;
|
||||
|
||||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return 0;
|
||||
@ -3780,6 +3788,9 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
|
||||
spin_lock(&phba->nvme_buf_list_put_lock);
|
||||
list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
|
||||
list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
|
||||
cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
|
||||
phba->get_nvme_bufs = 0;
|
||||
phba->put_nvme_bufs = 0;
|
||||
spin_unlock(&phba->nvme_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
||||
|
||||
@ -3824,6 +3835,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
|
||||
spin_lock_irq(&phba->nvme_buf_list_get_lock);
|
||||
spin_lock(&phba->nvme_buf_list_put_lock);
|
||||
list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
|
||||
phba->get_nvme_bufs = cnt;
|
||||
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
|
||||
spin_unlock(&phba->nvme_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
||||
@ -5609,8 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
|
||||
/* Initialize the NVME buffer list used by driver for NVME IO */
|
||||
spin_lock_init(&phba->nvme_buf_list_get_lock);
|
||||
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
|
||||
phba->get_nvme_bufs = 0;
|
||||
spin_lock_init(&phba->nvme_buf_list_put_lock);
|
||||
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
|
||||
phba->put_nvme_bufs = 0;
|
||||
}
|
||||
|
||||
/* Initialize the fabric iocb list */
|
||||
@ -5806,6 +5820,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
struct lpfc_mqe *mqe;
|
||||
int longs;
|
||||
int fof_vectors = 0;
|
||||
int extra;
|
||||
uint64_t wwn;
|
||||
|
||||
phba->sli4_hba.num_online_cpu = num_online_cpus();
|
||||
@ -5859,14 +5874,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
* The WQ create will allocate the ring.
|
||||
*/
|
||||
|
||||
/*
|
||||
* 1 for cmd, 1 for rsp, NVME adds an extra one
|
||||
* for boundary conditions in its max_sgl_segment template.
|
||||
*/
|
||||
extra = 2;
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||||
extra++;
|
||||
|
||||
/*
|
||||
* It doesn't matter what family our adapter is in, we are
|
||||
* limited to 2 Pages, 512 SGEs, for our SGL.
|
||||
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
|
||||
*/
|
||||
max_buf_size = (2 * SLI4_PAGE_SIZE);
|
||||
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
|
||||
phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
|
||||
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
|
||||
phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
|
||||
|
||||
/*
|
||||
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
|
||||
@ -5899,14 +5922,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
*/
|
||||
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
|
||||
sizeof(struct fcp_rsp) +
|
||||
((phba->cfg_sg_seg_cnt + 2) *
|
||||
((phba->cfg_sg_seg_cnt + extra) *
|
||||
sizeof(struct sli4_sge));
|
||||
|
||||
/* Total SGEs for scsi_sg_list */
|
||||
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
|
||||
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
|
||||
|
||||
/*
|
||||
* NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
|
||||
* NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
|
||||
* need to post 1 page for the SGL.
|
||||
*/
|
||||
}
|
||||
@ -5947,9 +5970,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
|
||||
|
||||
/* Fast-path XRI aborted CQ Event work queue list */
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
|
||||
}
|
||||
|
||||
/* This abort list used by worker thread */
|
||||
@ -7936,8 +7956,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
|
||||
phba->cfg_fcp_io_channel = io_channel;
|
||||
if (phba->cfg_nvme_io_channel > io_channel)
|
||||
phba->cfg_nvme_io_channel = io_channel;
|
||||
if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
|
||||
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
|
||||
if (phba->nvmet_support) {
|
||||
if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
|
||||
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
|
||||
}
|
||||
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
|
||||
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
|
||||
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
|
||||
@ -7958,10 +7982,10 @@ static int
|
||||
lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
|
||||
{
|
||||
struct lpfc_queue *qdesc;
|
||||
int cnt;
|
||||
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
||||
phba->sli4_hba.cq_ecount);
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
|
||||
phba->sli4_hba.cq_esize,
|
||||
LPFC_CQE_EXP_COUNT);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0508 Failed allocate fast-path NVME CQ (%d)\n",
|
||||
@ -7970,8 +7994,8 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
|
||||
}
|
||||
phba->sli4_hba.nvme_cq[wqidx] = qdesc;
|
||||
|
||||
cnt = LPFC_NVME_WQSIZE;
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
|
||||
LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0509 Failed allocate fast-path NVME WQ (%d)\n",
|
||||
@ -7987,11 +8011,18 @@ static int
|
||||
lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
|
||||
{
|
||||
struct lpfc_queue *qdesc;
|
||||
uint32_t wqesize;
|
||||
|
||||
/* Create Fast Path FCP CQs */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
||||
phba->sli4_hba.cq_ecount);
|
||||
if (phba->fcp_embed_io)
|
||||
/* Increase the CQ size when WQEs contain an embedded cdb */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
|
||||
phba->sli4_hba.cq_esize,
|
||||
LPFC_CQE_EXP_COUNT);
|
||||
|
||||
else
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.cq_esize,
|
||||
phba->sli4_hba.cq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
|
||||
@ -8000,9 +8031,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
|
||||
phba->sli4_hba.fcp_cq[wqidx] = qdesc;
|
||||
|
||||
/* Create Fast Path FCP WQs */
|
||||
wqesize = (phba->fcp_embed_io) ?
|
||||
LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
|
||||
if (phba->fcp_embed_io)
|
||||
/* Increase the WQ size when WQEs contain an embedded cdb */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
|
||||
LPFC_WQE128_SIZE,
|
||||
LPFC_WQE_EXP_COUNT);
|
||||
else
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.wq_esize,
|
||||
phba->sli4_hba.wq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0503 Failed allocate fast-path FCP WQ (%d)\n",
|
||||
@ -8173,7 +8210,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
/* Create HBA Event Queues (EQs) */
|
||||
for (idx = 0; idx < io_channel; idx++) {
|
||||
/* Create EQs */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.eq_esize,
|
||||
phba->sli4_hba.eq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -8196,8 +8234,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
if (phba->nvmet_support) {
|
||||
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
|
||||
qdesc = lpfc_sli4_queue_alloc(phba,
|
||||
phba->sli4_hba.cq_esize,
|
||||
phba->sli4_hba.cq_ecount);
|
||||
LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.cq_esize,
|
||||
phba->sli4_hba.cq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3142 Failed allocate NVME "
|
||||
@ -8213,7 +8252,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
*/
|
||||
|
||||
/* Create slow-path Mailbox Command Complete Queue */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.cq_esize,
|
||||
phba->sli4_hba.cq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -8223,7 +8263,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.mbx_cq = qdesc;
|
||||
|
||||
/* Create slow-path ELS Complete Queue */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.cq_esize,
|
||||
phba->sli4_hba.cq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -8239,7 +8280,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
|
||||
/* Create Mailbox Command Queue */
|
||||
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.mq_esize,
|
||||
phba->sli4_hba.mq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -8253,7 +8295,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
*/
|
||||
|
||||
/* Create slow-path ELS Work Queue */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.wq_esize,
|
||||
phba->sli4_hba.wq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -8265,7 +8308,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
/* Create NVME LS Complete Queue */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.cq_esize,
|
||||
phba->sli4_hba.cq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -8275,7 +8319,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.nvmels_cq = qdesc;
|
||||
|
||||
/* Create NVME LS Work Queue */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.wq_esize,
|
||||
phba->sli4_hba.wq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -8291,7 +8336,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
*/
|
||||
|
||||
/* Create Receive Queue for header */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.rq_esize,
|
||||
phba->sli4_hba.rq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -8301,7 +8347,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.hdr_rq = qdesc;
|
||||
|
||||
/* Create Receive Queue for data */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.rq_esize,
|
||||
phba->sli4_hba.rq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -8314,6 +8361,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
|
||||
/* Create NVMET Receive Queue for header */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba,
|
||||
LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.rq_esize,
|
||||
LPFC_NVMET_RQE_DEF_COUNT);
|
||||
if (!qdesc) {
|
||||
@ -8339,6 +8387,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
|
||||
/* Create NVMET Receive Queue for data */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba,
|
||||
LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.rq_esize,
|
||||
LPFC_NVMET_RQE_DEF_COUNT);
|
||||
if (!qdesc) {
|
||||
@ -8437,13 +8486,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
|
||||
/* Release NVME CQ mapping array */
|
||||
lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
|
||||
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
|
||||
phba->cfg_nvmet_mrq);
|
||||
if (phba->nvmet_support) {
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
|
||||
phba->cfg_nvmet_mrq);
|
||||
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
|
||||
phba->cfg_nvmet_mrq);
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
|
||||
phba->cfg_nvmet_mrq);
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
|
||||
phba->cfg_nvmet_mrq);
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
|
||||
phba->cfg_nvmet_mrq);
|
||||
}
|
||||
|
||||
/* Release mailbox command work queue */
|
||||
__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
|
||||
@ -8514,6 +8565,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
|
||||
qidx, (uint32_t)rc);
|
||||
return rc;
|
||||
}
|
||||
cq->chann = qidx;
|
||||
|
||||
if (qtype != LPFC_MBOX) {
|
||||
/* Setup nvme_cq_map for fast lookup */
|
||||
@ -8533,6 +8585,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
|
||||
/* no need to tear down cq - caller will do so */
|
||||
return rc;
|
||||
}
|
||||
wq->chann = qidx;
|
||||
|
||||
/* Bind this CQ/WQ to the NVME ring */
|
||||
pring = wq->pring;
|
||||
@ -8773,6 +8826,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
"rc = 0x%x\n", (uint32_t)rc);
|
||||
goto out_destroy;
|
||||
}
|
||||
phba->sli4_hba.nvmet_cqset[0]->chann = 0;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"6090 NVMET CQ setup: cq-id=%d, "
|
||||
"parent eq-id=%d\n",
|
||||
@ -8994,19 +9049,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
|
||||
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
|
||||
lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
|
||||
|
||||
/* Unset NVMET MRQ queue */
|
||||
if (phba->sli4_hba.nvmet_mrq_hdr) {
|
||||
for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
|
||||
lpfc_rq_destroy(phba,
|
||||
if (phba->nvmet_support) {
|
||||
/* Unset NVMET MRQ queue */
|
||||
if (phba->sli4_hba.nvmet_mrq_hdr) {
|
||||
for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
|
||||
lpfc_rq_destroy(
|
||||
phba,
|
||||
phba->sli4_hba.nvmet_mrq_hdr[qidx],
|
||||
phba->sli4_hba.nvmet_mrq_data[qidx]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Unset NVMET CQ Set complete queue */
|
||||
if (phba->sli4_hba.nvmet_cqset) {
|
||||
for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
|
||||
lpfc_cq_destroy(phba,
|
||||
phba->sli4_hba.nvmet_cqset[qidx]);
|
||||
/* Unset NVMET CQ Set complete queue */
|
||||
if (phba->sli4_hba.nvmet_cqset) {
|
||||
for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
|
||||
lpfc_cq_destroy(
|
||||
phba, phba->sli4_hba.nvmet_cqset[qidx]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Unset FCP response complete queue */
|
||||
@ -9175,11 +9233,6 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
|
||||
/* Pending ELS XRI abort events */
|
||||
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
|
||||
&cqelist);
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
/* Pending NVME XRI abort events */
|
||||
list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
|
||||
&cqelist);
|
||||
}
|
||||
/* Pending asynnc events */
|
||||
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
|
||||
&cqelist);
|
||||
@ -9421,44 +9474,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||
lpfc_sli4_bar0_register_memmap(phba, if_type);
|
||||
}
|
||||
|
||||
if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
|
||||
(pci_resource_start(pdev, PCI_64BIT_BAR2))) {
|
||||
/*
|
||||
* Map SLI4 if type 0 HBA Control Register base to a kernel
|
||||
* virtual address and setup the registers.
|
||||
*/
|
||||
phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
|
||||
bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
|
||||
phba->sli4_hba.ctrl_regs_memmap_p =
|
||||
ioremap(phba->pci_bar1_map, bar1map_len);
|
||||
if (!phba->sli4_hba.ctrl_regs_memmap_p) {
|
||||
dev_printk(KERN_ERR, &pdev->dev,
|
||||
"ioremap failed for SLI4 HBA control registers.\n");
|
||||
if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
|
||||
if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
|
||||
/*
|
||||
* Map SLI4 if type 0 HBA Control Register base to a
|
||||
* kernel virtual address and setup the registers.
|
||||
*/
|
||||
phba->pci_bar1_map = pci_resource_start(pdev,
|
||||
PCI_64BIT_BAR2);
|
||||
bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
|
||||
phba->sli4_hba.ctrl_regs_memmap_p =
|
||||
ioremap(phba->pci_bar1_map,
|
||||
bar1map_len);
|
||||
if (!phba->sli4_hba.ctrl_regs_memmap_p) {
|
||||
dev_err(&pdev->dev,
|
||||
"ioremap failed for SLI4 HBA "
|
||||
"control registers.\n");
|
||||
error = -ENOMEM;
|
||||
goto out_iounmap_conf;
|
||||
}
|
||||
phba->pci_bar2_memmap_p =
|
||||
phba->sli4_hba.ctrl_regs_memmap_p;
|
||||
lpfc_sli4_bar1_register_memmap(phba);
|
||||
} else {
|
||||
error = -ENOMEM;
|
||||
goto out_iounmap_conf;
|
||||
}
|
||||
phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
|
||||
lpfc_sli4_bar1_register_memmap(phba);
|
||||
}
|
||||
|
||||
if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
|
||||
(pci_resource_start(pdev, PCI_64BIT_BAR4))) {
|
||||
/*
|
||||
* Map SLI4 if type 0 HBA Doorbell Register base to a kernel
|
||||
* virtual address and setup the registers.
|
||||
*/
|
||||
phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
|
||||
bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
|
||||
phba->sli4_hba.drbl_regs_memmap_p =
|
||||
ioremap(phba->pci_bar2_map, bar2map_len);
|
||||
if (!phba->sli4_hba.drbl_regs_memmap_p) {
|
||||
dev_printk(KERN_ERR, &pdev->dev,
|
||||
"ioremap failed for SLI4 HBA doorbell registers.\n");
|
||||
goto out_iounmap_ctrl;
|
||||
}
|
||||
phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
|
||||
error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
|
||||
if (error)
|
||||
if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
|
||||
if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
|
||||
/*
|
||||
* Map SLI4 if type 0 HBA Doorbell Register base to
|
||||
* a kernel virtual address and setup the registers.
|
||||
*/
|
||||
phba->pci_bar2_map = pci_resource_start(pdev,
|
||||
PCI_64BIT_BAR4);
|
||||
bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
|
||||
phba->sli4_hba.drbl_regs_memmap_p =
|
||||
ioremap(phba->pci_bar2_map,
|
||||
bar2map_len);
|
||||
if (!phba->sli4_hba.drbl_regs_memmap_p) {
|
||||
dev_err(&pdev->dev,
|
||||
"ioremap failed for SLI4 HBA"
|
||||
" doorbell registers.\n");
|
||||
error = -ENOMEM;
|
||||
goto out_iounmap_ctrl;
|
||||
}
|
||||
phba->pci_bar4_memmap_p =
|
||||
phba->sli4_hba.drbl_regs_memmap_p;
|
||||
error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
|
||||
if (error)
|
||||
goto out_iounmap_all;
|
||||
} else {
|
||||
error = -ENOMEM;
|
||||
goto out_iounmap_all;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -10093,6 +10164,16 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
int fcp_xri_cmpl = 1;
|
||||
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
||||
|
||||
/* Driver just aborted IOs during the hba_unset process. Pause
|
||||
* here to give the HBA time to complete the IO and get entries
|
||||
* into the abts lists.
|
||||
*/
|
||||
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
|
||||
|
||||
/* Wait for NVME pending IO to flush back to transport. */
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||||
lpfc_nvme_wait_for_io_drain(phba);
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
|
||||
fcp_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
|
||||
@ -10369,7 +10450,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
!phba->nvme_support) {
|
||||
phba->nvme_support = 0;
|
||||
phba->nvmet_support = 0;
|
||||
phba->cfg_nvmet_mrq = 0;
|
||||
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
|
||||
phba->cfg_nvme_io_channel = 0;
|
||||
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
|
||||
@ -11616,6 +11697,10 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
|
||||
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
|
||||
lpfc_sli_flush_fcp_rings(phba);
|
||||
|
||||
/* Flush the outstanding NVME IOs if fc4 type enabled. */
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||||
lpfc_sli_flush_nvme_rings(phba);
|
||||
|
||||
/* stop all timers */
|
||||
lpfc_stop_hba_timers(phba);
|
||||
|
||||
@ -11647,6 +11732,10 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
|
||||
|
||||
/* Clean up all driver's outstanding SCSI I/Os */
|
||||
lpfc_sli_flush_fcp_rings(phba);
|
||||
|
||||
/* Flush the outstanding NVME IOs if fc4 type enabled. */
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||||
lpfc_sli_flush_nvme_rings(phba);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -12138,10 +12227,10 @@ int
|
||||
lpfc_fof_queue_create(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_queue *qdesc;
|
||||
uint32_t wqesize;
|
||||
|
||||
/* Create FOF EQ */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.eq_esize,
|
||||
phba->sli4_hba.eq_ecount);
|
||||
if (!qdesc)
|
||||
goto out_error;
|
||||
@ -12151,7 +12240,15 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
|
||||
if (phba->cfg_fof) {
|
||||
|
||||
/* Create OAS CQ */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
||||
if (phba->fcp_embed_io)
|
||||
qdesc = lpfc_sli4_queue_alloc(phba,
|
||||
LPFC_EXPANDED_PAGE_SIZE,
|
||||
phba->sli4_hba.cq_esize,
|
||||
LPFC_CQE_EXP_COUNT);
|
||||
else
|
||||
qdesc = lpfc_sli4_queue_alloc(phba,
|
||||
LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.cq_esize,
|
||||
phba->sli4_hba.cq_ecount);
|
||||
if (!qdesc)
|
||||
goto out_error;
|
||||
@ -12159,11 +12256,16 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.oas_cq = qdesc;
|
||||
|
||||
/* Create OAS WQ */
|
||||
wqesize = (phba->fcp_embed_io) ?
|
||||
LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
|
||||
phba->sli4_hba.wq_ecount);
|
||||
|
||||
if (phba->fcp_embed_io)
|
||||
qdesc = lpfc_sli4_queue_alloc(phba,
|
||||
LPFC_EXPANDED_PAGE_SIZE,
|
||||
LPFC_WQE128_SIZE,
|
||||
LPFC_WQE_EXP_COUNT);
|
||||
else
|
||||
qdesc = lpfc_sli4_queue_alloc(phba,
|
||||
LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.wq_esize,
|
||||
phba->sli4_hba.wq_ecount);
|
||||
if (!qdesc)
|
||||
goto out_error;
|
||||
|
||||
|
@ -390,6 +390,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
break;
|
||||
}
|
||||
|
||||
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
|
||||
ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
|
||||
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
|
||||
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
|
||||
|
||||
/* Check for Nport to NPort pt2pt protocol */
|
||||
if ((vport->fc_flag & FC_PT2PT) &&
|
||||
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
|
||||
@ -727,6 +732,41 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
|
||||
struct lpfc_nodelist *ndlp,
|
||||
struct lpfc_iocbq *cmdiocb)
|
||||
{
|
||||
struct ls_rjt stat;
|
||||
uint32_t *payload;
|
||||
uint32_t cmd;
|
||||
|
||||
payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
|
||||
cmd = *payload;
|
||||
if (vport->phba->nvmet_support) {
|
||||
/* Must be a NVME PRLI */
|
||||
if (cmd == ELS_CMD_PRLI)
|
||||
goto out;
|
||||
} else {
|
||||
/* Initiator mode. */
|
||||
if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
|
||||
goto out;
|
||||
}
|
||||
return 1;
|
||||
out:
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
|
||||
"6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
|
||||
"state x%x flags x%x\n",
|
||||
cmd, ndlp->nlp_rpi, ndlp->nlp_state,
|
||||
ndlp->nlp_flag);
|
||||
memset(&stat, 0, sizeof(struct ls_rjt));
|
||||
stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
|
||||
stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
|
||||
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
|
||||
ndlp, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
struct lpfc_iocbq *cmdiocb)
|
||||
@ -742,9 +782,6 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
lp = (uint32_t *) pcmd->virt;
|
||||
npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
|
||||
|
||||
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
|
||||
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
|
||||
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
|
||||
if ((npr->prliType == PRLI_FCP_TYPE) ||
|
||||
(npr->prliType == PRLI_NVME_TYPE)) {
|
||||
if (npr->initiatorFunc) {
|
||||
@ -769,8 +806,12 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
* type. Target mode does not issue gft_id so doesn't get
|
||||
* the fc4 type set until now.
|
||||
*/
|
||||
if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE))
|
||||
if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
|
||||
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||
}
|
||||
if (npr->prliType == PRLI_FCP_TYPE)
|
||||
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
|
||||
}
|
||||
if (rport) {
|
||||
/* We need to update the rport role values */
|
||||
@ -1373,7 +1414,8 @@ lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
{
|
||||
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
|
||||
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
|
||||
if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
|
||||
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
|
||||
return ndlp->nlp_state;
|
||||
}
|
||||
|
||||
@ -1544,6 +1586,9 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
|
||||
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
struct ls_rjt stat;
|
||||
|
||||
if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
|
||||
return ndlp->nlp_state;
|
||||
}
|
||||
if (vport->phba->nvmet_support) {
|
||||
/* NVME Target mode. Handle and respond to the PRLI and
|
||||
* transition to UNMAPPED provided the RPI has completed
|
||||
@ -1552,28 +1597,22 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
|
||||
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
|
||||
lpfc_rcv_prli(vport, ndlp, cmdiocb);
|
||||
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||
} else {
|
||||
/* RPI registration has not completed. Reject the PRLI
|
||||
* to prevent an illegal state transition when the
|
||||
* rpi registration does complete.
|
||||
*/
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
|
||||
"6115 NVMET ndlp rpi %d state "
|
||||
"unknown, state x%x flags x%08x\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_state,
|
||||
ndlp->nlp_flag);
|
||||
memset(&stat, 0, sizeof(struct ls_rjt));
|
||||
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
|
||||
stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
|
||||
stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
|
||||
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
|
||||
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
|
||||
ndlp, NULL);
|
||||
return ndlp->nlp_state;
|
||||
}
|
||||
} else {
|
||||
/* Initiator mode. */
|
||||
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
|
||||
}
|
||||
|
||||
return ndlp->nlp_state;
|
||||
}
|
||||
|
||||
@ -1819,6 +1858,8 @@ lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
{
|
||||
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
|
||||
if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
|
||||
return ndlp->nlp_state;
|
||||
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
|
||||
return ndlp->nlp_state;
|
||||
}
|
||||
@ -1922,13 +1963,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
return ndlp->nlp_state;
|
||||
}
|
||||
|
||||
/* Check out PRLI rsp */
|
||||
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
|
||||
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
|
||||
|
||||
/* NVME or FCP first burst must be negotiated for each PRLI. */
|
||||
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
|
||||
ndlp->nvme_fb_size = 0;
|
||||
if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
|
||||
(npr->prliType == PRLI_FCP_TYPE)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
@ -1945,8 +1979,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
if (npr->Retry)
|
||||
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
|
||||
|
||||
/* PRLI completed. Decrement count. */
|
||||
ndlp->fc4_prli_sent--;
|
||||
} else if (nvpr &&
|
||||
(bf_get_be32(prli_acc_rsp_code, nvpr) ==
|
||||
PRLI_REQ_EXECUTED) &&
|
||||
@ -1991,8 +2023,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
be32_to_cpu(nvpr->word5),
|
||||
ndlp->nlp_flag, ndlp->nlp_fcp_info,
|
||||
ndlp->nlp_type);
|
||||
/* PRLI completed. Decrement count. */
|
||||
ndlp->fc4_prli_sent--;
|
||||
}
|
||||
if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
|
||||
(vport->port_type == LPFC_NPIV_PORT) &&
|
||||
@ -2016,7 +2046,8 @@ out_err:
|
||||
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
|
||||
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
|
||||
else
|
||||
else if (ndlp->nlp_type &
|
||||
(NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||
} else
|
||||
lpfc_printf_vlog(vport,
|
||||
@ -2241,6 +2272,9 @@ lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
{
|
||||
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
|
||||
if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
|
||||
return ndlp->nlp_state;
|
||||
|
||||
lpfc_rcv_prli(vport, ndlp, cmdiocb);
|
||||
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
|
||||
return ndlp->nlp_state;
|
||||
@ -2310,6 +2344,8 @@ lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
{
|
||||
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
|
||||
if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
|
||||
return ndlp->nlp_state;
|
||||
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
|
||||
return ndlp->nlp_state;
|
||||
}
|
||||
|
@ -57,11 +57,13 @@
|
||||
/* NVME initiator-based functions */
|
||||
|
||||
static struct lpfc_nvme_buf *
|
||||
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
|
||||
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
||||
int expedite);
|
||||
|
||||
static void
|
||||
lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
|
||||
|
||||
static struct nvme_fc_port_template lpfc_nvme_template;
|
||||
|
||||
/**
|
||||
* lpfc_nvme_create_queue -
|
||||
@ -88,6 +90,9 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
|
||||
struct lpfc_nvme_qhandle *qhandle;
|
||||
char *str;
|
||||
|
||||
if (!pnvme_lport->private)
|
||||
return -ENOMEM;
|
||||
|
||||
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
|
||||
vport = lport->vport;
|
||||
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
|
||||
@ -140,6 +145,9 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_vport *vport;
|
||||
|
||||
if (!pnvme_lport->private)
|
||||
return;
|
||||
|
||||
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
|
||||
vport = lport->vport;
|
||||
|
||||
@ -154,6 +162,10 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
|
||||
{
|
||||
struct lpfc_nvme_lport *lport = localport->private;
|
||||
|
||||
lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
|
||||
"6173 localport %p delete complete\n",
|
||||
lport);
|
||||
|
||||
/* release any threads waiting for the unreg to complete */
|
||||
complete(&lport->lport_unreg_done);
|
||||
}
|
||||
@ -189,16 +201,19 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
|
||||
* calling state machine to remove the node.
|
||||
*/
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6146 remoteport delete complete %p\n",
|
||||
"6146 remoteport delete of remoteport %p\n",
|
||||
remoteport);
|
||||
spin_lock_irq(&vport->phba->hbalock);
|
||||
ndlp->nrport = NULL;
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
|
||||
/* Remove original register reference. The host transport
|
||||
* won't reference this rport/remoteport any further.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
|
||||
rport_err:
|
||||
/* This call has to execute as long as the rport is valid.
|
||||
* Release any threads waiting for the unreg to complete.
|
||||
*/
|
||||
complete(&rport->rport_unreg_done);
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -206,6 +221,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
struct lpfc_wcqe_complete *wcqe)
|
||||
{
|
||||
struct lpfc_vport *vport = cmdwqe->vport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
uint32_t status;
|
||||
struct nvmefc_ls_req *pnvme_lsreq;
|
||||
struct lpfc_dmabuf *buf_ptr;
|
||||
@ -215,6 +231,13 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
|
||||
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
|
||||
if (status) {
|
||||
lport = (struct lpfc_nvme_lport *)vport->localport->private;
|
||||
if (bf_get(lpfc_wcqe_c_xb, wcqe))
|
||||
atomic_inc(&lport->cmpl_ls_xb);
|
||||
atomic_inc(&lport->cmpl_ls_err);
|
||||
}
|
||||
|
||||
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6047 nvme cmpl Enter "
|
||||
@ -416,6 +439,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
|
||||
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
|
||||
vport = lport->vport;
|
||||
|
||||
if (vport->load_flag & FC_UNLOADING)
|
||||
return -ENODEV;
|
||||
|
||||
if (vport->load_flag & FC_UNLOADING)
|
||||
return -ENODEV;
|
||||
|
||||
@ -490,6 +516,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
|
||||
pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
|
||||
ndlp, 2, 30, 0);
|
||||
if (ret != WQE_SUCCESS) {
|
||||
atomic_inc(&lport->xmt_ls_err);
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6052 EXIT. issue ls wqe failed lport %p, "
|
||||
"rport %p lsreq%p Status %x DID %x\n",
|
||||
@ -534,6 +561,9 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
vport = lport->vport;
|
||||
phba = vport->phba;
|
||||
|
||||
if (vport->load_flag & FC_UNLOADING)
|
||||
return;
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
|
||||
if (!ndlp) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
@ -571,6 +601,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
|
||||
/* Abort the targeted IOs and remove them from the abort list. */
|
||||
list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
|
||||
atomic_inc(&lport->xmt_ls_abort);
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
list_del_init(&wqe->dlist);
|
||||
lpfc_sli_issue_abort_iotag(phba, pring, wqe);
|
||||
@ -774,8 +805,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_nvme_fcpreq_priv *freqpriv;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
unsigned long flags;
|
||||
uint32_t code;
|
||||
uint32_t code, status;
|
||||
uint16_t cid, sqhd, data;
|
||||
uint32_t *ptr;
|
||||
|
||||
@ -790,10 +822,17 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
|
||||
nCmd = lpfc_ncmd->nvmeCmd;
|
||||
rport = lpfc_ncmd->nrport;
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe);
|
||||
if (status) {
|
||||
lport = (struct lpfc_nvme_lport *)vport->localport->private;
|
||||
if (bf_get(lpfc_wcqe_c_xb, wcqe))
|
||||
atomic_inc(&lport->cmpl_fcp_xb);
|
||||
atomic_inc(&lport->cmpl_fcp_err);
|
||||
}
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag,
|
||||
bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
|
||||
status, wcqe->parameter);
|
||||
/*
|
||||
* Catch race where our node has transitioned, but the
|
||||
* transport is still transitioning.
|
||||
@ -851,8 +890,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
|
||||
nCmd->transferred_length = nCmd->payload_length;
|
||||
} else {
|
||||
lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
|
||||
LPFC_IOCB_STATUS_MASK);
|
||||
lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
|
||||
lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
|
||||
|
||||
/* For NVME, the only failure path that results in an
|
||||
@ -946,10 +984,13 @@ out_err:
|
||||
freqpriv->nvme_buf = NULL;
|
||||
|
||||
/* NVME targets need completion held off until the abort exchange
|
||||
* completes.
|
||||
* completes unless the NVME Rport is getting unregistered.
|
||||
*/
|
||||
if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY))
|
||||
|
||||
if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
|
||||
nCmd->done(nCmd);
|
||||
lpfc_ncmd->nvmeCmd = NULL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
lpfc_ncmd->nrport = NULL;
|
||||
@ -1149,7 +1190,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
|
||||
first_data_sgl = sgl;
|
||||
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
|
||||
if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) {
|
||||
if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6058 Too many sg segments from "
|
||||
"NVME Transport. Max %d, "
|
||||
@ -1239,6 +1280,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
struct nvmefc_fcp_req *pnvme_fcreq)
|
||||
{
|
||||
int ret = 0;
|
||||
int expedite = 0;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_vport *vport;
|
||||
struct lpfc_hba *phba;
|
||||
@ -1246,13 +1288,30 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
struct lpfc_nvme_buf *lpfc_ncmd;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct lpfc_nvme_qhandle *lpfc_queue_info;
|
||||
struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
|
||||
struct lpfc_nvme_fcpreq_priv *freqpriv;
|
||||
struct nvme_common_command *sqe;
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
uint64_t start = 0;
|
||||
#endif
|
||||
|
||||
/* Validate pointers. LLDD fault handling with transport does
|
||||
* have timing races.
|
||||
*/
|
||||
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
|
||||
if (unlikely(!lport)) {
|
||||
ret = -EINVAL;
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
vport = lport->vport;
|
||||
|
||||
if (unlikely(!hw_queue_handle)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6129 Fail Abort, NULL hw_queue_handle\n");
|
||||
ret = -EINVAL;
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
phba = vport->phba;
|
||||
|
||||
if (vport->load_flag & FC_UNLOADING) {
|
||||
@ -1260,16 +1319,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
/* Validate pointers. */
|
||||
if (!pnvme_lport || !pnvme_rport || !freqpriv) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
|
||||
"6117 No Send:IO submit ptrs NULL, lport %p, "
|
||||
"rport %p fcreq_priv %p\n",
|
||||
pnvme_lport, pnvme_rport, freqpriv);
|
||||
if (vport->load_flag & FC_UNLOADING) {
|
||||
ret = -ENODEV;
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
freqpriv = pnvme_fcreq->private;
|
||||
if (unlikely(!freqpriv)) {
|
||||
ret = -EINVAL;
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
if (phba->ktime_on)
|
||||
start = ktime_get_ns();
|
||||
@ -1293,6 +1353,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6066 Missing node for DID %x\n",
|
||||
pnvme_rport->port_id);
|
||||
atomic_inc(&lport->xmt_fcp_bad_ndlp);
|
||||
ret = -ENODEV;
|
||||
goto out_fail;
|
||||
}
|
||||
@ -1306,21 +1367,36 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
"IO. State x%x, Type x%x\n",
|
||||
rport, pnvme_rport->port_id,
|
||||
ndlp->nlp_state, ndlp->nlp_type);
|
||||
atomic_inc(&lport->xmt_fcp_bad_ndlp);
|
||||
ret = -ENODEV;
|
||||
goto out_fail;
|
||||
|
||||
}
|
||||
|
||||
/* Currently only NVME Keep alive commands should be expedited
|
||||
* if the driver runs out of a resource. These should only be
|
||||
* issued on the admin queue, qidx 0
|
||||
*/
|
||||
if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
|
||||
sqe = &((struct nvme_fc_cmd_iu *)
|
||||
pnvme_fcreq->cmdaddr)->sqe.common;
|
||||
if (sqe->opcode == nvme_admin_keep_alive)
|
||||
expedite = 1;
|
||||
}
|
||||
|
||||
/* The node is shared with FCP IO, make sure the IO pending count does
|
||||
* not exceed the programmed depth.
|
||||
*/
|
||||
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
|
||||
if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
|
||||
!expedite) {
|
||||
atomic_inc(&lport->xmt_fcp_qdepth);
|
||||
ret = -EBUSY;
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
|
||||
lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
|
||||
if (lpfc_ncmd == NULL) {
|
||||
atomic_inc(&lport->xmt_fcp_noxri);
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
|
||||
"6065 driver's buffer pool is empty, "
|
||||
"IO failed\n");
|
||||
@ -1373,6 +1449,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
|
||||
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
|
||||
if (ret) {
|
||||
atomic_inc(&lport->xmt_fcp_wqerr);
|
||||
atomic_dec(&ndlp->cmd_pending);
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
|
||||
"6113 FCP could not issue WQE err %x "
|
||||
@ -1473,19 +1550,36 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_vport *vport;
|
||||
struct lpfc_hba *phba;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct lpfc_nvme_buf *lpfc_nbuf;
|
||||
struct lpfc_iocbq *abts_buf;
|
||||
struct lpfc_iocbq *nvmereq_wqe;
|
||||
struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
|
||||
struct lpfc_nvme_fcpreq_priv *freqpriv;
|
||||
union lpfc_wqe *abts_wqe;
|
||||
unsigned long flags;
|
||||
int ret_val;
|
||||
|
||||
/* Validate pointers. LLDD fault handling with transport does
|
||||
* have timing races.
|
||||
*/
|
||||
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
|
||||
rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
|
||||
if (unlikely(!lport))
|
||||
return;
|
||||
|
||||
vport = lport->vport;
|
||||
|
||||
if (unlikely(!hw_queue_handle)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6129 Fail Abort, HW Queue Handle NULL.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
phba = vport->phba;
|
||||
freqpriv = pnvme_fcreq->private;
|
||||
|
||||
if (unlikely(!freqpriv))
|
||||
return;
|
||||
if (vport->load_flag & FC_UNLOADING)
|
||||
return;
|
||||
|
||||
/* Announce entry to new IO submit field. */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
|
||||
@ -1552,6 +1646,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_inc(&lport->xmt_fcp_abort);
|
||||
lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
|
||||
nvmereq_wqe->sli4_xritag,
|
||||
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
|
||||
@ -1931,6 +2026,8 @@ lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
|
||||
spin_lock(&phba->nvme_buf_list_put_lock);
|
||||
list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
|
||||
list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
|
||||
phba->get_nvme_bufs = 0;
|
||||
phba->put_nvme_bufs = 0;
|
||||
spin_unlock(&phba->nvme_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
||||
|
||||
@ -2067,6 +2164,20 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
|
||||
return num_posted;
|
||||
}
|
||||
|
||||
static inline struct lpfc_nvme_buf *
|
||||
lpfc_nvme_buf(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
|
||||
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&phba->lpfc_nvme_buf_list_get, list) {
|
||||
list_del_init(&lpfc_ncmd->list);
|
||||
phba->get_nvme_bufs--;
|
||||
return lpfc_ncmd;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
|
||||
* @phba: The HBA for which this call is being executed.
|
||||
@ -2079,35 +2190,27 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
|
||||
* Pointer to lpfc_nvme_buf - Success
|
||||
**/
|
||||
static struct lpfc_nvme_buf *
|
||||
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
||||
int expedite)
|
||||
{
|
||||
struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
|
||||
struct lpfc_nvme_buf *lpfc_ncmd = NULL;
|
||||
unsigned long iflag = 0;
|
||||
int found = 0;
|
||||
|
||||
spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&phba->lpfc_nvme_buf_list_get, list) {
|
||||
list_del_init(&lpfc_ncmd->list);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
if (!found) {
|
||||
if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
|
||||
lpfc_ncmd = lpfc_nvme_buf(phba);
|
||||
if (!lpfc_ncmd) {
|
||||
spin_lock(&phba->nvme_buf_list_put_lock);
|
||||
list_splice(&phba->lpfc_nvme_buf_list_put,
|
||||
&phba->lpfc_nvme_buf_list_get);
|
||||
phba->get_nvme_bufs += phba->put_nvme_bufs;
|
||||
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
|
||||
phba->put_nvme_bufs = 0;
|
||||
spin_unlock(&phba->nvme_buf_list_put_lock);
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&phba->lpfc_nvme_buf_list_get, list) {
|
||||
list_del_init(&lpfc_ncmd->list);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
|
||||
lpfc_ncmd = lpfc_nvme_buf(phba);
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
|
||||
if (!found)
|
||||
return NULL;
|
||||
return lpfc_ncmd;
|
||||
}
|
||||
|
||||
@ -2145,6 +2248,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
|
||||
lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
|
||||
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
|
||||
list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
|
||||
phba->put_nvme_bufs++;
|
||||
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
|
||||
}
|
||||
}
|
||||
@ -2221,6 +2325,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||
lport->vport = vport;
|
||||
vport->nvmei_support = 1;
|
||||
|
||||
atomic_set(&lport->xmt_fcp_noxri, 0);
|
||||
atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
|
||||
atomic_set(&lport->xmt_fcp_qdepth, 0);
|
||||
atomic_set(&lport->xmt_fcp_wqerr, 0);
|
||||
atomic_set(&lport->xmt_fcp_abort, 0);
|
||||
atomic_set(&lport->xmt_ls_abort, 0);
|
||||
atomic_set(&lport->xmt_ls_err, 0);
|
||||
atomic_set(&lport->cmpl_fcp_xb, 0);
|
||||
atomic_set(&lport->cmpl_fcp_err, 0);
|
||||
atomic_set(&lport->cmpl_ls_xb, 0);
|
||||
atomic_set(&lport->cmpl_ls_err, 0);
|
||||
|
||||
/* Don't post more new bufs if repost already recovered
|
||||
* the nvme sgls.
|
||||
*/
|
||||
@ -2234,6 +2350,47 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
|
||||
*
|
||||
* The driver has to wait for the host nvme transport to callback
|
||||
* indicating the localport has successfully unregistered all
|
||||
* resources. Since this is an uninterruptible wait, loop every ten
|
||||
* seconds and print a message indicating no progress.
|
||||
*
|
||||
* An uninterruptible wait is used because of the risk of transport-to-
|
||||
* driver state mismatch.
|
||||
*/
|
||||
void
|
||||
lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
|
||||
struct lpfc_nvme_lport *lport)
|
||||
{
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
u32 wait_tmo;
|
||||
int ret;
|
||||
|
||||
/* Host transport has to clean up and confirm requiring an indefinite
|
||||
* wait. Print a message if a 10 second wait expires and renew the
|
||||
* wait. This is unexpected.
|
||||
*/
|
||||
wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
|
||||
while (true) {
|
||||
ret = wait_for_completion_timeout(&lport->lport_unreg_done,
|
||||
wait_tmo);
|
||||
if (unlikely(!ret)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6176 Lport %p Localport %p wait "
|
||||
"timed out. Renewing.\n",
|
||||
lport, vport->localport);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
|
||||
"6177 Lport %p Localport %p Complete Success\n",
|
||||
lport, vport->localport);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
|
||||
* @pnvme: pointer to lpfc nvme data structure.
|
||||
@ -2268,7 +2425,11 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||
*/
|
||||
init_completion(&lport->lport_unreg_done);
|
||||
ret = nvme_fc_unregister_localport(localport);
|
||||
wait_for_completion_timeout(&lport->lport_unreg_done, 5);
|
||||
|
||||
/* Wait for completion. This either blocks
|
||||
* indefinitely or succeeds
|
||||
*/
|
||||
lpfc_nvme_lport_unreg_wait(vport, lport);
|
||||
|
||||
/* Regardless of the unregister upcall response, clear
|
||||
* nvmei_support. All rports are unregistered and the
|
||||
@ -2365,6 +2526,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
|
||||
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
|
||||
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
|
||||
if (!ndlp->nrport)
|
||||
lpfc_nlp_get(ndlp);
|
||||
|
||||
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
|
||||
if (!ret) {
|
||||
/* If the ndlp already has an nrport, this is just
|
||||
@ -2373,23 +2537,33 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
*/
|
||||
rport = remote_port->private;
|
||||
if (ndlp->nrport) {
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
|
||||
LOG_NVME_DISC,
|
||||
"6014 Rebinding lport to "
|
||||
"rport wwpn 0x%llx, "
|
||||
"Data: x%x x%x x%x x%06x\n",
|
||||
remote_port->port_name,
|
||||
remote_port->port_id,
|
||||
remote_port->port_role,
|
||||
ndlp->nlp_type,
|
||||
ndlp->nlp_DID);
|
||||
if (ndlp->nrport == remote_port->private) {
|
||||
/* Same remoteport. Just reuse. */
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
|
||||
LOG_NVME_DISC,
|
||||
"6014 Rebinding lport to "
|
||||
"remoteport %p wwpn 0x%llx, "
|
||||
"Data: x%x x%x %p x%x x%06x\n",
|
||||
remote_port,
|
||||
remote_port->port_name,
|
||||
remote_port->port_id,
|
||||
remote_port->port_role,
|
||||
ndlp,
|
||||
ndlp->nlp_type,
|
||||
ndlp->nlp_DID);
|
||||
return 0;
|
||||
}
|
||||
prev_ndlp = rport->ndlp;
|
||||
|
||||
/* Sever the ndlp<->rport connection before dropping
|
||||
* the ndlp ref from register.
|
||||
/* Sever the ndlp<->rport association
|
||||
* before dropping the ndlp ref from
|
||||
* register.
|
||||
*/
|
||||
spin_lock_irq(&vport->phba->hbalock);
|
||||
ndlp->nrport = NULL;
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
rport->ndlp = NULL;
|
||||
rport->remoteport = NULL;
|
||||
if (prev_ndlp)
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
@ -2397,19 +2571,20 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
/* Clean bind the rport to the ndlp. */
|
||||
rport->remoteport = remote_port;
|
||||
rport->lport = lport;
|
||||
rport->ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!rport->ndlp)
|
||||
return -1;
|
||||
rport->ndlp = ndlp;
|
||||
spin_lock_irq(&vport->phba->hbalock);
|
||||
ndlp->nrport = rport;
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
lpfc_printf_vlog(vport, KERN_INFO,
|
||||
LOG_NVME_DISC | LOG_NODE,
|
||||
"6022 Binding new rport to "
|
||||
"lport %p Rport WWNN 0x%llx, "
|
||||
"lport %p Remoteport %p WWNN 0x%llx, "
|
||||
"Rport WWPN 0x%llx DID "
|
||||
"x%06x Role x%x\n",
|
||||
lport,
|
||||
"x%06x Role x%x, ndlp %p\n",
|
||||
lport, remote_port,
|
||||
rpinfo.node_name, rpinfo.port_name,
|
||||
rpinfo.port_id, rpinfo.port_role);
|
||||
rpinfo.port_id, rpinfo.port_role,
|
||||
ndlp);
|
||||
} else {
|
||||
lpfc_printf_vlog(vport, KERN_ERR,
|
||||
LOG_NVME_DISC | LOG_NODE,
|
||||
@ -2473,20 +2648,20 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
/* Sanity check ndlp type. Only call for NVME ports. Don't
|
||||
* clear any rport state until the transport calls back.
|
||||
*/
|
||||
if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
|
||||
init_completion(&rport->rport_unreg_done);
|
||||
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET) {
|
||||
/* No concern about the role change on the nvme remoteport.
|
||||
* The transport will update it.
|
||||
*/
|
||||
ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
|
||||
ret = nvme_fc_unregister_remoteport(remoteport);
|
||||
if (ret != 0) {
|
||||
lpfc_nlp_put(ndlp);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6167 NVME unregister failed %d "
|
||||
"port_state x%x\n",
|
||||
ret, remoteport->port_state);
|
||||
}
|
||||
|
||||
}
|
||||
return;
|
||||
|
||||
@ -2545,8 +2720,11 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
|
||||
* before the abort exchange command fully completes.
|
||||
* Once completed, it is available via the put list.
|
||||
*/
|
||||
nvme_cmd = lpfc_ncmd->nvmeCmd;
|
||||
nvme_cmd->done(nvme_cmd);
|
||||
if (lpfc_ncmd->nvmeCmd) {
|
||||
nvme_cmd = lpfc_ncmd->nvmeCmd;
|
||||
nvme_cmd->done(nvme_cmd);
|
||||
lpfc_ncmd->nvmeCmd = NULL;
|
||||
}
|
||||
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||
return;
|
||||
}
|
||||
@ -2558,3 +2736,45 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
|
||||
"6312 XRI Aborted xri x%x not found\n", xri);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
|
||||
* @phba: Pointer to HBA context object.
|
||||
*
|
||||
* This function flushes all wqes in the nvme rings and frees all resources
|
||||
* in the txcmplq. This function does not issue abort wqes for the IO
|
||||
* commands in txcmplq, they will just be returned with
|
||||
* IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
|
||||
* slot has been permanently disabled.
|
||||
**/
|
||||
void
|
||||
lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_sli_ring *pring;
|
||||
u32 i, wait_cnt = 0;
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||
return;
|
||||
|
||||
/* Cycle through all NVME rings and make sure all outstanding
|
||||
* WQEs have been removed from the txcmplqs.
|
||||
*/
|
||||
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
|
||||
pring = phba->sli4_hba.nvme_wq[i]->pring;
|
||||
|
||||
/* Retrieve everything on the txcmplq */
|
||||
while (!list_empty(&pring->txcmplq)) {
|
||||
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
|
||||
wait_cnt++;
|
||||
|
||||
/* The sleep is 10mS. Every ten seconds,
|
||||
* dump a message. Something is wrong.
|
||||
*/
|
||||
if ((wait_cnt % 1000) == 0) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6178 NVME IO not empty, "
|
||||
"cnt %d\n", wait_cnt);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -22,10 +22,12 @@
|
||||
********************************************************************/
|
||||
|
||||
#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
|
||||
#define LPFC_NVME_WQSIZE 256
|
||||
|
||||
#define LPFC_NVME_ERSP_LEN 0x20
|
||||
|
||||
#define LPFC_NVME_WAIT_TMO 10
|
||||
#define LPFC_NVME_EXPEDITE_XRICNT 8
|
||||
|
||||
struct lpfc_nvme_qhandle {
|
||||
uint32_t index; /* WQ index to use */
|
||||
uint32_t qidx; /* queue index passed to create */
|
||||
@ -36,7 +38,18 @@ struct lpfc_nvme_qhandle {
|
||||
struct lpfc_nvme_lport {
|
||||
struct lpfc_vport *vport;
|
||||
struct completion lport_unreg_done;
|
||||
/* Add sttats counters here */
|
||||
/* Add stats counters here */
|
||||
atomic_t xmt_fcp_noxri;
|
||||
atomic_t xmt_fcp_bad_ndlp;
|
||||
atomic_t xmt_fcp_qdepth;
|
||||
atomic_t xmt_fcp_wqerr;
|
||||
atomic_t xmt_fcp_abort;
|
||||
atomic_t xmt_ls_abort;
|
||||
atomic_t xmt_ls_err;
|
||||
atomic_t cmpl_fcp_xb;
|
||||
atomic_t cmpl_fcp_err;
|
||||
atomic_t cmpl_ls_xb;
|
||||
atomic_t cmpl_ls_err;
|
||||
};
|
||||
|
||||
struct lpfc_nvme_rport {
|
||||
|
@ -38,6 +38,7 @@
|
||||
|
||||
#include <../drivers/nvme/host/nvme.h>
|
||||
#include <linux/nvme-fc-driver.h>
|
||||
#include <linux/nvme-fc.h>
|
||||
|
||||
#include "lpfc_version.h"
|
||||
#include "lpfc_hw4.h"
|
||||
@ -126,10 +127,17 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
|
||||
if (status)
|
||||
atomic_inc(&tgtp->xmt_ls_rsp_error);
|
||||
else
|
||||
atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
|
||||
if (tgtp) {
|
||||
if (status) {
|
||||
atomic_inc(&tgtp->xmt_ls_rsp_error);
|
||||
if (status == IOERR_ABORT_REQUESTED)
|
||||
atomic_inc(&tgtp->xmt_ls_rsp_aborted);
|
||||
if (bf_get(lpfc_wcqe_c_xb, wcqe))
|
||||
atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
|
||||
} else {
|
||||
atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
rsp = &ctxp->ctx.ls_req;
|
||||
@ -218,6 +226,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
ctxp->entry_cnt = 1;
|
||||
ctxp->flag = 0;
|
||||
ctxp->ctxbuf = ctx_buf;
|
||||
ctxp->rqb_buffer = (void *)nvmebuf;
|
||||
spin_lock_init(&ctxp->ctxlock);
|
||||
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
@ -253,6 +262,17 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Processing of FCP command is deferred */
|
||||
if (rc == -EOVERFLOW) {
|
||||
lpfc_nvmeio_data(phba,
|
||||
"NVMET RCV BUSY: xri x%x sz %d "
|
||||
"from %06x\n",
|
||||
oxid, size, sid);
|
||||
/* defer repost rcv buffer till .defer_rcv callback */
|
||||
ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_out);
|
||||
return;
|
||||
}
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
|
||||
@ -519,8 +539,11 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
if (status) {
|
||||
rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
|
||||
rsp->transferred_length = 0;
|
||||
if (tgtp)
|
||||
if (tgtp) {
|
||||
atomic_inc(&tgtp->xmt_fcp_rsp_error);
|
||||
if (status == IOERR_ABORT_REQUESTED)
|
||||
atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
|
||||
}
|
||||
|
||||
logerr = LOG_NVME_IOERR;
|
||||
|
||||
@ -528,6 +551,8 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
|
||||
ctxp->flag |= LPFC_NVMET_XBUSY;
|
||||
logerr |= LOG_NVME_ABTS;
|
||||
if (tgtp)
|
||||
atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
|
||||
|
||||
} else {
|
||||
ctxp->flag &= ~LPFC_NVMET_XBUSY;
|
||||
@ -632,6 +657,9 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
|
||||
struct ulp_bde64 bpl;
|
||||
int rc;
|
||||
|
||||
if (phba->pport->load_flag & FC_UNLOADING)
|
||||
return -ENODEV;
|
||||
|
||||
if (phba->pport->load_flag & FC_UNLOADING)
|
||||
return -ENODEV;
|
||||
|
||||
@ -721,6 +749,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||
goto aerr;
|
||||
}
|
||||
|
||||
if (phba->pport->load_flag & FC_UNLOADING) {
|
||||
rc = -ENODEV;
|
||||
goto aerr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
if (ctxp->ts_cmd_nvme) {
|
||||
if (rsp->op == NVMET_FCOP_RSP)
|
||||
@ -820,6 +853,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
||||
struct lpfc_hba *phba = ctxp->phba;
|
||||
unsigned long flags;
|
||||
|
||||
if (phba->pport->load_flag & FC_UNLOADING)
|
||||
return;
|
||||
|
||||
if (phba->pport->load_flag & FC_UNLOADING)
|
||||
return;
|
||||
|
||||
@ -910,7 +946,11 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
|
||||
|
||||
tgtp = phba->targetport->private;
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
|
||||
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
|
||||
if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
|
||||
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
|
||||
else
|
||||
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
|
||||
ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
|
||||
}
|
||||
|
||||
static struct nvmet_fc_target_template lpfc_tgttemplate = {
|
||||
@ -1216,6 +1256,8 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
|
||||
atomic_set(&tgtp->xmt_ls_rsp, 0);
|
||||
atomic_set(&tgtp->xmt_ls_drop, 0);
|
||||
atomic_set(&tgtp->xmt_ls_rsp_error, 0);
|
||||
atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
|
||||
atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
|
||||
atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
|
||||
atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
|
||||
atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
|
||||
@ -1228,7 +1270,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
|
||||
atomic_set(&tgtp->xmt_fcp_release, 0);
|
||||
atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
|
||||
atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
|
||||
atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
|
||||
atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
|
||||
atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
|
||||
atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
|
||||
atomic_set(&tgtp->xmt_fcp_abort, 0);
|
||||
atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
|
||||
atomic_set(&tgtp->xmt_abort_unsol, 0);
|
||||
@ -1270,6 +1315,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
|
||||
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
unsigned long iflag = 0;
|
||||
int rrq_empty = 0;
|
||||
@ -1280,6 +1326,12 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||
|
||||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return;
|
||||
|
||||
if (phba->targetport) {
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
list_for_each_entry_safe(ctxp, next_ctxp,
|
||||
@ -1682,6 +1734,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||
ctxp->entry_cnt = 1;
|
||||
ctxp->flag = 0;
|
||||
ctxp->ctxbuf = ctx_buf;
|
||||
ctxp->rqb_buffer = (void *)nvmebuf;
|
||||
spin_lock_init(&ctxp->ctxlock);
|
||||
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
@ -1715,6 +1768,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||
|
||||
/* Process FCP command */
|
||||
if (rc == 0) {
|
||||
ctxp->rqb_buffer = NULL;
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_out);
|
||||
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
|
||||
return;
|
||||
@ -1726,10 +1780,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||
"NVMET RCV BUSY: xri x%x sz %d from %06x\n",
|
||||
oxid, size, sid);
|
||||
/* defer reposting rcv buffer till .defer_rcv callback */
|
||||
ctxp->rqb_buffer = nvmebuf;
|
||||
ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_out);
|
||||
return;
|
||||
}
|
||||
ctxp->rqb_buffer = nvmebuf;
|
||||
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
@ -1992,7 +2047,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
|
||||
if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6109 NVMET prep FCP wqe: seg cnt err: "
|
||||
"NPORT x%x oxid x%x ste %d cnt %d\n",
|
||||
|
@ -25,6 +25,10 @@
|
||||
#define LPFC_NVMET_RQE_DEF_COUNT 512
|
||||
#define LPFC_NVMET_SUCCESS_LEN 12
|
||||
|
||||
#define LPFC_NVMET_MRQ_OFF 0xffff
|
||||
#define LPFC_NVMET_MRQ_AUTO 0
|
||||
#define LPFC_NVMET_MRQ_MAX 16
|
||||
|
||||
/* Used for NVME Target */
|
||||
struct lpfc_nvmet_tgtport {
|
||||
struct lpfc_hba *phba;
|
||||
@ -43,6 +47,8 @@ struct lpfc_nvmet_tgtport {
|
||||
|
||||
/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
|
||||
atomic_t xmt_ls_rsp_error;
|
||||
atomic_t xmt_ls_rsp_aborted;
|
||||
atomic_t xmt_ls_rsp_xb_set;
|
||||
atomic_t xmt_ls_rsp_cmpl;
|
||||
|
||||
/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
|
||||
@ -60,12 +66,15 @@ struct lpfc_nvmet_tgtport {
|
||||
atomic_t xmt_fcp_rsp;
|
||||
|
||||
/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
|
||||
atomic_t xmt_fcp_rsp_xb_set;
|
||||
atomic_t xmt_fcp_rsp_cmpl;
|
||||
atomic_t xmt_fcp_rsp_error;
|
||||
atomic_t xmt_fcp_rsp_aborted;
|
||||
atomic_t xmt_fcp_rsp_drop;
|
||||
|
||||
|
||||
/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
|
||||
atomic_t xmt_fcp_xri_abort_cqe;
|
||||
atomic_t xmt_fcp_abort;
|
||||
atomic_t xmt_fcp_abort_cmpl;
|
||||
atomic_t xmt_abort_sol;
|
||||
@ -122,6 +131,7 @@ struct lpfc_nvmet_rcv_ctx {
|
||||
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
|
||||
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
|
||||
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
|
||||
#define LPFC_NVMET_DEFER_RCV_REPOST 0x20 /* repost to RQ on defer rcv */
|
||||
struct rqb_dmabuf *rqb_buffer;
|
||||
struct lpfc_nvmet_ctxbuf *ctxbuf;
|
||||
|
||||
|
@ -475,28 +475,30 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
|
||||
struct lpfc_rqe *temp_hrqe;
|
||||
struct lpfc_rqe *temp_drqe;
|
||||
struct lpfc_register doorbell;
|
||||
int put_index;
|
||||
int hq_put_index;
|
||||
int dq_put_index;
|
||||
|
||||
/* sanity check on queue memory */
|
||||
if (unlikely(!hq) || unlikely(!dq))
|
||||
return -ENOMEM;
|
||||
put_index = hq->host_index;
|
||||
temp_hrqe = hq->qe[put_index].rqe;
|
||||
temp_drqe = dq->qe[dq->host_index].rqe;
|
||||
hq_put_index = hq->host_index;
|
||||
dq_put_index = dq->host_index;
|
||||
temp_hrqe = hq->qe[hq_put_index].rqe;
|
||||
temp_drqe = dq->qe[dq_put_index].rqe;
|
||||
|
||||
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
|
||||
return -EINVAL;
|
||||
if (put_index != dq->host_index)
|
||||
if (hq_put_index != dq_put_index)
|
||||
return -EINVAL;
|
||||
/* If the host has not yet processed the next entry then we are done */
|
||||
if (((put_index + 1) % hq->entry_count) == hq->hba_index)
|
||||
if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
|
||||
return -EBUSY;
|
||||
lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
|
||||
lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
|
||||
|
||||
/* Update the host index to point to the next slot */
|
||||
hq->host_index = ((put_index + 1) % hq->entry_count);
|
||||
dq->host_index = ((dq->host_index + 1) % dq->entry_count);
|
||||
hq->host_index = ((hq_put_index + 1) % hq->entry_count);
|
||||
dq->host_index = ((dq_put_index + 1) % dq->entry_count);
|
||||
hq->RQ_buf_posted++;
|
||||
|
||||
/* Ring The Header Receive Queue Doorbell */
|
||||
@ -517,7 +519,7 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
|
||||
}
|
||||
writel(doorbell.word0, hq->db_regaddr);
|
||||
}
|
||||
return put_index;
|
||||
return hq_put_index;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -12317,41 +12319,6 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine is invoked by the worker thread to process all the pending
|
||||
* SLI4 NVME abort XRI events.
|
||||
**/
|
||||
void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_cq_event *cq_event;
|
||||
|
||||
/* First, declare the fcp xri abort event has been handled */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
/* Now, handle all the fcp xri abort events */
|
||||
while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
|
||||
/* Get the first event from the head of the event queue */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
|
||||
cq_event, struct lpfc_cq_event, list);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
/* Notify aborted XRI for NVME work queue */
|
||||
if (phba->nvmet_support) {
|
||||
lpfc_sli4_nvmet_xri_aborted(phba,
|
||||
&cq_event->cqe.wcqe_axri);
|
||||
} else {
|
||||
lpfc_sli4_nvme_xri_aborted(phba,
|
||||
&cq_event->cqe.wcqe_axri);
|
||||
}
|
||||
/* Free the event processed back to the free pool */
|
||||
lpfc_sli4_cq_event_release(phba, cq_event);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@ -12548,6 +12515,24 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
|
||||
return irspiocbq;
|
||||
}
|
||||
|
||||
inline struct lpfc_cq_event *
|
||||
lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
|
||||
{
|
||||
struct lpfc_cq_event *cq_event;
|
||||
|
||||
/* Allocate a new internal CQ_EVENT entry */
|
||||
cq_event = lpfc_sli4_cq_event_alloc(phba);
|
||||
if (!cq_event) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"0602 Failed to alloc CQ_EVENT entry\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Move the CQE into the event */
|
||||
memcpy(&cq_event->cqe, entry, size);
|
||||
return cq_event;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
|
||||
* @phba: Pointer to HBA context object.
|
||||
@ -12569,16 +12554,9 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
|
||||
"word2:x%x, word3:x%x\n", mcqe->word0,
|
||||
mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
|
||||
|
||||
/* Allocate a new internal CQ_EVENT entry */
|
||||
cq_event = lpfc_sli4_cq_event_alloc(phba);
|
||||
if (!cq_event) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"0394 Failed to allocate CQ_EVENT entry\n");
|
||||
cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
|
||||
if (!cq_event)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Move the CQE into an asynchronous event entry */
|
||||
memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
|
||||
/* Set the async event flag */
|
||||
@ -12824,18 +12802,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
|
||||
struct lpfc_cq_event *cq_event;
|
||||
unsigned long iflags;
|
||||
|
||||
/* Allocate a new internal CQ_EVENT entry */
|
||||
cq_event = lpfc_sli4_cq_event_alloc(phba);
|
||||
if (!cq_event) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"0602 Failed to allocate CQ_EVENT entry\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Move the CQE into the proper xri abort event list */
|
||||
memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
|
||||
switch (cq->subtype) {
|
||||
case LPFC_FCP:
|
||||
cq_event = lpfc_cq_event_setup(
|
||||
phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
|
||||
if (!cq_event)
|
||||
return false;
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
list_add_tail(&cq_event->list,
|
||||
&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
|
||||
@ -12844,7 +12816,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
workposted = true;
|
||||
break;
|
||||
case LPFC_NVME_LS: /* NVME LS uses ELS resources */
|
||||
case LPFC_ELS:
|
||||
cq_event = lpfc_cq_event_setup(
|
||||
phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
|
||||
if (!cq_event)
|
||||
return false;
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
list_add_tail(&cq_event->list,
|
||||
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
|
||||
@ -12854,13 +12831,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
|
||||
workposted = true;
|
||||
break;
|
||||
case LPFC_NVME:
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
list_add_tail(&cq_event->list,
|
||||
&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
|
||||
/* Set the nvme xri abort event flag */
|
||||
phba->hba_flag |= NVME_XRI_ABORT_EVENT;
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
workposted = true;
|
||||
/* Notify aborted XRI for NVME work queue */
|
||||
if (phba->nvmet_support)
|
||||
lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
|
||||
else
|
||||
lpfc_sli4_nvme_xri_aborted(phba, wcqe);
|
||||
|
||||
workposted = false;
|
||||
break;
|
||||
default:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
@ -12868,7 +12845,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
|
||||
"%08x %08x %08x %08x\n",
|
||||
cq->subtype, wcqe->word0, wcqe->parameter,
|
||||
wcqe->word2, wcqe->word3);
|
||||
lpfc_sli4_cq_event_release(phba, cq_event);
|
||||
workposted = false;
|
||||
break;
|
||||
}
|
||||
@ -12913,8 +12889,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"2537 Receive Frame Truncated!!\n");
|
||||
case FC_STATUS_RQ_SUCCESS:
|
||||
lpfc_sli4_rq_release(hrq, drq);
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
lpfc_sli4_rq_release(hrq, drq);
|
||||
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
|
||||
if (!dma_buf) {
|
||||
hrq->RQ_no_buf_found++;
|
||||
@ -13316,8 +13292,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
"6126 Receive Frame Truncated!!\n");
|
||||
/* Drop thru */
|
||||
case FC_STATUS_RQ_SUCCESS:
|
||||
lpfc_sli4_rq_release(hrq, drq);
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
lpfc_sli4_rq_release(hrq, drq);
|
||||
dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
|
||||
if (!dma_buf) {
|
||||
hrq->RQ_no_buf_found++;
|
||||
@ -13919,7 +13895,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
|
||||
while (!list_empty(&queue->page_list)) {
|
||||
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
|
||||
list);
|
||||
dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
|
||||
dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
|
||||
dmabuf->virt, dmabuf->phys);
|
||||
kfree(dmabuf);
|
||||
}
|
||||
@ -13938,6 +13914,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
|
||||
/**
|
||||
* lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
|
||||
* @phba: The HBA that this queue is being created on.
|
||||
* @page_size: The size of a queue page
|
||||
* @entry_size: The size of each queue entry for this queue.
|
||||
* @entry count: The number of entries that this queue will handle.
|
||||
*
|
||||
@ -13946,8 +13923,8 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
|
||||
* queue on the HBA.
|
||||
**/
|
||||
struct lpfc_queue *
|
||||
lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
|
||||
uint32_t entry_count)
|
||||
lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
|
||||
uint32_t entry_size, uint32_t entry_count)
|
||||
{
|
||||
struct lpfc_queue *queue;
|
||||
struct lpfc_dmabuf *dmabuf;
|
||||
@ -13956,7 +13933,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
|
||||
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
|
||||
|
||||
if (!phba->sli4_hba.pc_sli4_params.supported)
|
||||
hw_page_size = SLI4_PAGE_SIZE;
|
||||
hw_page_size = page_size;
|
||||
|
||||
queue = kzalloc(sizeof(struct lpfc_queue) +
|
||||
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
|
||||
@ -13973,6 +13950,15 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
|
||||
INIT_LIST_HEAD(&queue->wq_list);
|
||||
INIT_LIST_HEAD(&queue->page_list);
|
||||
INIT_LIST_HEAD(&queue->child_list);
|
||||
|
||||
/* Set queue parameters now. If the system cannot provide memory
|
||||
* resources, the free routine needs to know what was allocated.
|
||||
*/
|
||||
queue->entry_size = entry_size;
|
||||
queue->entry_count = entry_count;
|
||||
queue->page_size = hw_page_size;
|
||||
queue->phba = phba;
|
||||
|
||||
for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
|
||||
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
||||
if (!dmabuf)
|
||||
@ -13994,9 +13980,6 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
|
||||
queue->qe[total_qe_count].address = dma_pointer;
|
||||
}
|
||||
}
|
||||
queue->entry_size = entry_size;
|
||||
queue->entry_count = entry_count;
|
||||
queue->phba = phba;
|
||||
INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
|
||||
INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
|
||||
|
||||
@ -14299,7 +14282,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
if (!cq || !eq)
|
||||
return -ENODEV;
|
||||
if (!phba->sli4_hba.pc_sli4_params.supported)
|
||||
hw_page_size = SLI4_PAGE_SIZE;
|
||||
hw_page_size = cq->page_size;
|
||||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mbox)
|
||||
@ -14318,8 +14301,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
bf_set(lpfc_mbox_hdr_version, &shdr->request,
|
||||
phba->sli4_hba.pc_sli4_params.cqv);
|
||||
if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
|
||||
/* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
|
||||
bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
|
||||
bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
|
||||
(cq->page_size / SLI4_PAGE_SIZE));
|
||||
bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
|
||||
eq->queue_id);
|
||||
} else {
|
||||
@ -14327,6 +14310,18 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
eq->queue_id);
|
||||
}
|
||||
switch (cq->entry_count) {
|
||||
case 2048:
|
||||
case 4096:
|
||||
if (phba->sli4_hba.pc_sli4_params.cqv ==
|
||||
LPFC_Q_CREATE_VERSION_2) {
|
||||
cq_create->u.request.context.lpfc_cq_context_count =
|
||||
cq->entry_count;
|
||||
bf_set(lpfc_cq_context_count,
|
||||
&cq_create->u.request.context,
|
||||
LPFC_CQ_CNT_WORD7);
|
||||
break;
|
||||
}
|
||||
/* Fall Thru */
|
||||
default:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"0361 Unsupported CQ count: "
|
||||
@ -14352,7 +14347,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
break;
|
||||
}
|
||||
list_for_each_entry(dmabuf, &cq->page_list, list) {
|
||||
memset(dmabuf->virt, 0, hw_page_size);
|
||||
memset(dmabuf->virt, 0, cq->page_size);
|
||||
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
|
||||
putPaddrLow(dmabuf->phys);
|
||||
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
||||
@ -14433,8 +14428,6 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
|
||||
numcq = phba->cfg_nvmet_mrq;
|
||||
if (!cqp || !eqp || !numcq)
|
||||
return -ENODEV;
|
||||
if (!phba->sli4_hba.pc_sli4_params.supported)
|
||||
hw_page_size = SLI4_PAGE_SIZE;
|
||||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mbox)
|
||||
@ -14465,6 +14458,8 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
|
||||
status = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (!phba->sli4_hba.pc_sli4_params.supported)
|
||||
hw_page_size = cq->page_size;
|
||||
|
||||
switch (idx) {
|
||||
case 0:
|
||||
@ -14482,6 +14477,19 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
|
||||
bf_set(lpfc_mbx_cq_create_set_num_cq,
|
||||
&cq_set->u.request, numcq);
|
||||
switch (cq->entry_count) {
|
||||
case 2048:
|
||||
case 4096:
|
||||
if (phba->sli4_hba.pc_sli4_params.cqv ==
|
||||
LPFC_Q_CREATE_VERSION_2) {
|
||||
bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
|
||||
&cq_set->u.request,
|
||||
cq->entry_count);
|
||||
bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
|
||||
&cq_set->u.request,
|
||||
LPFC_CQ_CNT_WORD7);
|
||||
break;
|
||||
}
|
||||
/* Fall Thru */
|
||||
default:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"3118 Bad CQ count. (%d)\n",
|
||||
@ -14578,6 +14586,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
|
||||
cq->host_index = 0;
|
||||
cq->hba_index = 0;
|
||||
cq->entry_repost = LPFC_CQ_REPOST;
|
||||
cq->chann = idx;
|
||||
|
||||
rc = 0;
|
||||
list_for_each_entry(dmabuf, &cq->page_list, list) {
|
||||
@ -14872,12 +14881,13 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
||||
void __iomem *bar_memmap_p;
|
||||
uint32_t db_offset;
|
||||
uint16_t pci_barset;
|
||||
uint8_t wq_create_version;
|
||||
|
||||
/* sanity check on queue memory */
|
||||
if (!wq || !cq)
|
||||
return -ENODEV;
|
||||
if (!phba->sli4_hba.pc_sli4_params.supported)
|
||||
hw_page_size = SLI4_PAGE_SIZE;
|
||||
hw_page_size = wq->page_size;
|
||||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mbox)
|
||||
@ -14898,7 +14908,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
||||
bf_set(lpfc_mbox_hdr_version, &shdr->request,
|
||||
phba->sli4_hba.pc_sli4_params.wqv);
|
||||
|
||||
switch (phba->sli4_hba.pc_sli4_params.wqv) {
|
||||
if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
|
||||
wq_create_version = LPFC_Q_CREATE_VERSION_1;
|
||||
else
|
||||
wq_create_version = LPFC_Q_CREATE_VERSION_0;
|
||||
|
||||
switch (wq_create_version) {
|
||||
case LPFC_Q_CREATE_VERSION_0:
|
||||
switch (wq->entry_size) {
|
||||
default:
|
||||
@ -14956,7 +14971,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
||||
}
|
||||
bf_set(lpfc_mbx_wq_create_page_size,
|
||||
&wq_create->u.request_1,
|
||||
LPFC_WQ_PAGE_SIZE_4096);
|
||||
(wq->page_size / SLI4_PAGE_SIZE));
|
||||
page = wq_create->u.request_1.page;
|
||||
break;
|
||||
default:
|
||||
|
@ -161,7 +161,6 @@ struct lpfc_queue {
|
||||
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
|
||||
uint32_t queue_id; /* Queue ID assigned by the hardware */
|
||||
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
|
||||
uint32_t page_count; /* Number of pages allocated for this queue */
|
||||
uint32_t host_index; /* The host's index for putting or getting */
|
||||
uint32_t hba_index; /* The last known hba index for get or put */
|
||||
|
||||
@ -169,6 +168,11 @@ struct lpfc_queue {
|
||||
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
|
||||
|
||||
uint32_t q_mode;
|
||||
uint16_t page_count; /* Number of pages allocated for this queue */
|
||||
uint16_t page_size; /* size of page allocated for this queue */
|
||||
#define LPFC_EXPANDED_PAGE_SIZE 16384
|
||||
#define LPFC_DEFAULT_PAGE_SIZE 4096
|
||||
uint16_t chann; /* IO channel this queue is associated with */
|
||||
uint16_t db_format;
|
||||
#define LPFC_DB_RING_FORMAT 0x01
|
||||
#define LPFC_DB_LIST_FORMAT 0x02
|
||||
@ -366,9 +370,9 @@ struct lpfc_bmbx {
|
||||
|
||||
#define LPFC_EQE_DEF_COUNT 1024
|
||||
#define LPFC_CQE_DEF_COUNT 1024
|
||||
#define LPFC_CQE_EXP_COUNT 4096
|
||||
#define LPFC_WQE_DEF_COUNT 256
|
||||
#define LPFC_WQE128_DEF_COUNT 128
|
||||
#define LPFC_WQE128_MAX_COUNT 256
|
||||
#define LPFC_WQE_EXP_COUNT 1024
|
||||
#define LPFC_MQE_DEF_COUNT 16
|
||||
#define LPFC_RQE_DEF_COUNT 512
|
||||
|
||||
@ -668,7 +672,6 @@ struct lpfc_sli4_hba {
|
||||
struct list_head sp_asynce_work_queue;
|
||||
struct list_head sp_fcp_xri_aborted_work_queue;
|
||||
struct list_head sp_els_xri_aborted_work_queue;
|
||||
struct list_head sp_nvme_xri_aborted_work_queue;
|
||||
struct list_head sp_unsol_work_queue;
|
||||
struct lpfc_sli4_link link_state;
|
||||
struct lpfc_sli4_lnk_info lnk_info;
|
||||
@ -769,7 +772,7 @@ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
|
||||
|
||||
void lpfc_sli4_hba_reset(struct lpfc_hba *);
|
||||
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
|
||||
uint32_t);
|
||||
uint32_t, uint32_t);
|
||||
void lpfc_sli4_queue_free(struct lpfc_queue *);
|
||||
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
|
||||
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
|
||||
@ -820,7 +823,6 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
|
||||
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
|
||||
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
|
||||
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
|
||||
void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
|
||||
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
|
||||
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
|
||||
struct sli4_wcqe_xri_aborted *);
|
||||
|
@ -20,7 +20,7 @@
|
||||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "11.4.0.4"
|
||||
#define LPFC_DRIVER_VERSION "11.4.0.6"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
|
@ -35,8 +35,8 @@
|
||||
/*
|
||||
* MegaRAID SAS Driver meta data
|
||||
*/
|
||||
#define MEGASAS_VERSION "07.703.05.00-rc1"
|
||||
#define MEGASAS_RELDATE "October 5, 2017"
|
||||
#define MEGASAS_VERSION "07.704.04.00-rc1"
|
||||
#define MEGASAS_RELDATE "December 7, 2017"
|
||||
|
||||
/*
|
||||
* Device IDs
|
||||
@ -197,6 +197,7 @@ enum MFI_CMD_OP {
|
||||
MFI_CMD_ABORT = 0x6,
|
||||
MFI_CMD_SMP = 0x7,
|
||||
MFI_CMD_STP = 0x8,
|
||||
MFI_CMD_NVME = 0x9,
|
||||
MFI_CMD_OP_COUNT,
|
||||
MFI_CMD_INVALID = 0xff
|
||||
};
|
||||
@ -230,7 +231,7 @@ enum MFI_CMD_OP {
|
||||
/*
|
||||
* Global functions
|
||||
*/
|
||||
extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
|
||||
extern u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id);
|
||||
|
||||
|
||||
/*
|
||||
@ -1352,7 +1353,13 @@ struct megasas_ctrl_info {
|
||||
|
||||
struct {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u16 reserved:8;
|
||||
u16 reserved:2;
|
||||
u16 support_nvme_passthru:1;
|
||||
u16 support_pl_debug_info:1;
|
||||
u16 support_flash_comp_info:1;
|
||||
u16 support_host_info:1;
|
||||
u16 support_dual_fw_update:1;
|
||||
u16 support_ssc_rev3:1;
|
||||
u16 fw_swaps_bbu_vpd_info:1;
|
||||
u16 support_pd_map_target_id:1;
|
||||
u16 support_ses_ctrl_in_multipathcfg:1;
|
||||
@ -1377,7 +1384,19 @@ struct megasas_ctrl_info {
|
||||
* provide the data in little endian order
|
||||
*/
|
||||
u16 fw_swaps_bbu_vpd_info:1;
|
||||
u16 reserved:8;
|
||||
u16 support_ssc_rev3:1;
|
||||
/* FW supports CacheCade 3.0, only one SSCD creation allowed */
|
||||
u16 support_dual_fw_update:1;
|
||||
/* FW supports dual firmware update feature */
|
||||
u16 support_host_info:1;
|
||||
/* FW supports MR_DCMD_CTRL_HOST_INFO_SET/GET */
|
||||
u16 support_flash_comp_info:1;
|
||||
/* FW supports MR_DCMD_CTRL_FLASH_COMP_INFO_GET */
|
||||
u16 support_pl_debug_info:1;
|
||||
/* FW supports retrieval of PL debug information through apps */
|
||||
u16 support_nvme_passthru:1;
|
||||
/* FW supports NVMe passthru commands */
|
||||
u16 reserved:2;
|
||||
#endif
|
||||
} adapter_operations4;
|
||||
u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
|
||||
@ -1630,7 +1649,8 @@ union megasas_sgl_frame {
|
||||
typedef union _MFI_CAPABILITIES {
|
||||
struct {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u32 reserved:18;
|
||||
u32 reserved:17;
|
||||
u32 support_nvme_passthru:1;
|
||||
u32 support_64bit_mode:1;
|
||||
u32 support_pd_map_target_id:1;
|
||||
u32 support_qd_throttling:1;
|
||||
@ -1660,7 +1680,8 @@ typedef union _MFI_CAPABILITIES {
|
||||
u32 support_qd_throttling:1;
|
||||
u32 support_pd_map_target_id:1;
|
||||
u32 support_64bit_mode:1;
|
||||
u32 reserved:18;
|
||||
u32 support_nvme_passthru:1;
|
||||
u32 reserved:17;
|
||||
#endif
|
||||
} mfi_capabilities;
|
||||
__le32 reg;
|
||||
@ -2188,7 +2209,6 @@ struct megasas_instance {
|
||||
struct megasas_evt_detail *evt_detail;
|
||||
dma_addr_t evt_detail_h;
|
||||
struct megasas_cmd *aen_cmd;
|
||||
struct mutex hba_mutex;
|
||||
struct semaphore ioctl_sem;
|
||||
|
||||
struct Scsi_Host *host;
|
||||
@ -2269,6 +2289,7 @@ struct megasas_instance {
|
||||
u32 nvme_page_size;
|
||||
u8 adapter_type;
|
||||
bool consistent_mask_64bit;
|
||||
bool support_nvme_passthru;
|
||||
};
|
||||
struct MR_LD_VF_MAP {
|
||||
u32 size;
|
||||
|
@ -181,6 +181,7 @@ static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
|
||||
static u32 support_poll_for_event;
|
||||
u32 megasas_dbg_lvl;
|
||||
static u32 support_device_change;
|
||||
static bool support_nvme_encapsulation;
|
||||
|
||||
/* define lock for aen poll */
|
||||
spinlock_t poll_aen_lock;
|
||||
@ -1952,7 +1953,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&instance->hba_mutex);
|
||||
mutex_lock(&instance->reset_mutex);
|
||||
/* Send DCMD to Firmware and cache the information */
|
||||
if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
|
||||
megasas_get_pd_info(instance, sdev);
|
||||
@ -1966,7 +1967,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
|
||||
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
|
||||
megasas_set_static_target_properties(sdev, is_target_prop);
|
||||
|
||||
mutex_unlock(&instance->hba_mutex);
|
||||
mutex_unlock(&instance->reset_mutex);
|
||||
|
||||
/* This sdev property may change post OCR */
|
||||
megasas_set_dynamic_target_properties(sdev);
|
||||
@ -3122,6 +3123,16 @@ megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
megasas_fw_cmds_outstanding_show(struct device *cdev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(cdev);
|
||||
struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
|
||||
megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
|
||||
static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
|
||||
@ -3132,6 +3143,8 @@ static DEVICE_ATTR(page_size, S_IRUGO,
|
||||
megasas_page_size_show, NULL);
|
||||
static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
|
||||
megasas_ldio_outstanding_show, NULL);
|
||||
static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
|
||||
megasas_fw_cmds_outstanding_show, NULL);
|
||||
|
||||
struct device_attribute *megaraid_host_attrs[] = {
|
||||
&dev_attr_fw_crash_buffer_size,
|
||||
@ -3139,6 +3152,7 @@ struct device_attribute *megaraid_host_attrs[] = {
|
||||
&dev_attr_fw_crash_state,
|
||||
&dev_attr_page_size,
|
||||
&dev_attr_ldio_outstanding,
|
||||
&dev_attr_fw_cmds_outstanding,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -3321,6 +3335,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
|
||||
|
||||
case MFI_CMD_SMP:
|
||||
case MFI_CMD_STP:
|
||||
case MFI_CMD_NVME:
|
||||
megasas_complete_int_cmd(instance, cmd);
|
||||
break;
|
||||
|
||||
@ -3331,10 +3346,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
|
||||
&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
|
||||
fusion->fast_path_io = 0;
|
||||
spin_lock_irqsave(instance->host->host_lock, flags);
|
||||
status = cmd->frame->hdr.cmd_status;
|
||||
instance->map_update_cmd = NULL;
|
||||
if (cmd->frame->hdr.cmd_status != 0) {
|
||||
if (cmd->frame->hdr.cmd_status !=
|
||||
MFI_STAT_NOT_FOUND)
|
||||
if (status != MFI_STAT_OK) {
|
||||
if (status != MFI_STAT_NOT_FOUND)
|
||||
dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
|
||||
cmd->frame->hdr.cmd_status);
|
||||
else {
|
||||
@ -3344,8 +3359,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
|
||||
flags);
|
||||
break;
|
||||
}
|
||||
} else
|
||||
instance->map_id++;
|
||||
}
|
||||
|
||||
megasas_return_cmd(instance, cmd);
|
||||
|
||||
/*
|
||||
@ -3353,10 +3368,14 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
|
||||
* Validate Map will set proper value.
|
||||
* Meanwhile all IOs will go as LD IO.
|
||||
*/
|
||||
if (MR_ValidateMapInfo(instance))
|
||||
if (status == MFI_STAT_OK &&
|
||||
(MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
|
||||
instance->map_id++;
|
||||
fusion->fast_path_io = 1;
|
||||
else
|
||||
} else {
|
||||
fusion->fast_path_io = 0;
|
||||
}
|
||||
|
||||
megasas_sync_map_info(instance);
|
||||
spin_unlock_irqrestore(instance->host->host_lock,
|
||||
flags);
|
||||
@ -4677,10 +4696,12 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
|
||||
sizeof(struct megasas_ctrl_info));
|
||||
|
||||
if ((instance->adapter_type != MFI_SERIES) &&
|
||||
!instance->mask_interrupts)
|
||||
!instance->mask_interrupts) {
|
||||
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
|
||||
else
|
||||
} else {
|
||||
ret = megasas_issue_polled(instance, cmd);
|
||||
cmd->flags |= DRV_DCMD_SKIP_REFIRE;
|
||||
}
|
||||
|
||||
switch (ret) {
|
||||
case DCMD_SUCCESS:
|
||||
@ -4702,6 +4723,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
|
||||
ci->adapterOperations3.useSeqNumJbodFP;
|
||||
instance->support_morethan256jbod =
|
||||
ci->adapter_operations4.support_pd_map_target_id;
|
||||
instance->support_nvme_passthru =
|
||||
ci->adapter_operations4.support_nvme_passthru;
|
||||
|
||||
/*Check whether controller is iMR or MR */
|
||||
instance->is_imr = (ci->memory_size ? 0 : 1);
|
||||
@ -4718,6 +4741,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
|
||||
instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
|
||||
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
|
||||
instance->secure_jbod_support ? "Yes" : "No");
|
||||
dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
|
||||
instance->support_nvme_passthru ? "Yes" : "No");
|
||||
break;
|
||||
|
||||
case DCMD_TIMEOUT:
|
||||
@ -5387,7 +5412,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||
}
|
||||
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
|
||||
fusion->stream_detect_by_ld[i] =
|
||||
kmalloc(sizeof(struct LD_STREAM_DETECT),
|
||||
kzalloc(sizeof(struct LD_STREAM_DETECT),
|
||||
GFP_KERNEL);
|
||||
if (!fusion->stream_detect_by_ld[i]) {
|
||||
dev_err(&instance->pdev->dev,
|
||||
@ -5432,7 +5457,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||
ctrl_info->adapterOperations2.supportUnevenSpans;
|
||||
if (instance->UnevenSpanSupport) {
|
||||
struct fusion_context *fusion = instance->ctrl_context;
|
||||
if (MR_ValidateMapInfo(instance))
|
||||
if (MR_ValidateMapInfo(instance, instance->map_id))
|
||||
fusion->fast_path_io = 1;
|
||||
else
|
||||
fusion->fast_path_io = 0;
|
||||
@ -5581,6 +5606,7 @@ megasas_get_seq_num(struct megasas_instance *instance,
|
||||
struct megasas_dcmd_frame *dcmd;
|
||||
struct megasas_evt_log_info *el_info;
|
||||
dma_addr_t el_info_h = 0;
|
||||
int ret;
|
||||
|
||||
cmd = megasas_get_cmd(instance);
|
||||
|
||||
@ -5613,26 +5639,29 @@ megasas_get_seq_num(struct megasas_instance *instance,
|
||||
megasas_set_dma_settings(instance, dcmd, el_info_h,
|
||||
sizeof(struct megasas_evt_log_info));
|
||||
|
||||
if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
|
||||
DCMD_SUCCESS) {
|
||||
/*
|
||||
* Copy the data back into callers buffer
|
||||
*/
|
||||
eli->newest_seq_num = el_info->newest_seq_num;
|
||||
eli->oldest_seq_num = el_info->oldest_seq_num;
|
||||
eli->clear_seq_num = el_info->clear_seq_num;
|
||||
eli->shutdown_seq_num = el_info->shutdown_seq_num;
|
||||
eli->boot_seq_num = el_info->boot_seq_num;
|
||||
} else
|
||||
dev_err(&instance->pdev->dev, "DCMD failed "
|
||||
"from %s\n", __func__);
|
||||
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
|
||||
if (ret != DCMD_SUCCESS) {
|
||||
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
|
||||
__func__, __LINE__);
|
||||
goto dcmd_failed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the data back into callers buffer
|
||||
*/
|
||||
eli->newest_seq_num = el_info->newest_seq_num;
|
||||
eli->oldest_seq_num = el_info->oldest_seq_num;
|
||||
eli->clear_seq_num = el_info->clear_seq_num;
|
||||
eli->shutdown_seq_num = el_info->shutdown_seq_num;
|
||||
eli->boot_seq_num = el_info->boot_seq_num;
|
||||
|
||||
dcmd_failed:
|
||||
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
|
||||
el_info, el_info_h);
|
||||
|
||||
megasas_return_cmd(instance, cmd);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -6346,7 +6375,6 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
|
||||
spin_lock_init(&instance->stream_lock);
|
||||
spin_lock_init(&instance->completion_lock);
|
||||
|
||||
mutex_init(&instance->hba_mutex);
|
||||
mutex_init(&instance->reset_mutex);
|
||||
|
||||
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
|
||||
@ -6704,6 +6732,7 @@ megasas_resume(struct pci_dev *pdev)
|
||||
*/
|
||||
|
||||
atomic_set(&instance->fw_outstanding, 0);
|
||||
atomic_set(&instance->ldio_outstanding, 0);
|
||||
|
||||
/* Now re-enable MSI-X */
|
||||
if (instance->msix_vectors) {
|
||||
@ -6822,7 +6851,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
|
||||
u32 pd_seq_map_sz;
|
||||
|
||||
instance = pci_get_drvdata(pdev);
|
||||
instance->unload = 1;
|
||||
host = instance->host;
|
||||
fusion = instance->ctrl_context;
|
||||
|
||||
@ -6833,6 +6861,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
|
||||
if (instance->fw_crash_state != UNAVAILABLE)
|
||||
megasas_free_host_crash_buffer(instance);
|
||||
scsi_remove_host(instance->host);
|
||||
instance->unload = 1;
|
||||
|
||||
if (megasas_wait_for_adapter_operational(instance))
|
||||
goto skip_firing_dcmds;
|
||||
@ -7087,7 +7116,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
|
||||
if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
|
||||
((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
|
||||
!instance->support_nvme_passthru)) {
|
||||
dev_err(&instance->pdev->dev,
|
||||
"Received invalid ioctl command 0x%x\n",
|
||||
ioc->frame.hdr.cmd);
|
||||
@ -7301,9 +7332,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
|
||||
struct megasas_iocpacket *ioc;
|
||||
struct megasas_instance *instance;
|
||||
int error;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
u32 wait_time = MEGASAS_RESET_WAIT_TIME;
|
||||
|
||||
ioc = memdup_user(user_ioc, sizeof(*ioc));
|
||||
if (IS_ERR(ioc))
|
||||
@ -7315,10 +7343,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
|
||||
goto out_kfree_ioc;
|
||||
}
|
||||
|
||||
/* Adjust ioctl wait time for VF mode */
|
||||
if (instance->requestorId)
|
||||
wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
|
||||
|
||||
/* Block ioctls in VF mode */
|
||||
if (instance->requestorId && !allow_vf_ioctls) {
|
||||
error = -ENODEV;
|
||||
@ -7341,32 +7365,10 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
|
||||
goto out_kfree_ioc;
|
||||
}
|
||||
|
||||
for (i = 0; i < wait_time; i++) {
|
||||
|
||||
spin_lock_irqsave(&instance->hba_lock, flags);
|
||||
if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
|
||||
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
||||
|
||||
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
|
||||
dev_notice(&instance->pdev->dev, "waiting"
|
||||
"for controller reset to finish\n");
|
||||
}
|
||||
|
||||
msleep(1000);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&instance->hba_lock, flags);
|
||||
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
|
||||
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
||||
|
||||
dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
|
||||
if (megasas_wait_for_adapter_operational(instance)) {
|
||||
error = -ENODEV;
|
||||
goto out_up;
|
||||
}
|
||||
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
||||
|
||||
error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
|
||||
out_up:
|
||||
@ -7382,9 +7384,6 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
|
||||
struct megasas_instance *instance;
|
||||
struct megasas_aen aen;
|
||||
int error;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
u32 wait_time = MEGASAS_RESET_WAIT_TIME;
|
||||
|
||||
if (file->private_data != file) {
|
||||
printk(KERN_DEBUG "megasas: fasync_helper was not "
|
||||
@ -7408,32 +7407,8 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
for (i = 0; i < wait_time; i++) {
|
||||
|
||||
spin_lock_irqsave(&instance->hba_lock, flags);
|
||||
if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
|
||||
spin_unlock_irqrestore(&instance->hba_lock,
|
||||
flags);
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
||||
|
||||
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
|
||||
dev_notice(&instance->pdev->dev, "waiting for"
|
||||
"controller reset to finish\n");
|
||||
}
|
||||
|
||||
msleep(1000);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&instance->hba_lock, flags);
|
||||
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
|
||||
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
||||
dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
|
||||
if (megasas_wait_for_adapter_operational(instance))
|
||||
return -ENODEV;
|
||||
}
|
||||
spin_unlock_irqrestore(&instance->hba_lock, flags);
|
||||
|
||||
mutex_lock(&instance->reset_mutex);
|
||||
error = megasas_register_aen(instance, aen.seq_num,
|
||||
@ -7613,6 +7588,14 @@ static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
|
||||
}
|
||||
static DRIVER_ATTR_RW(dbg_lvl);
|
||||
|
||||
static ssize_t
|
||||
support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", support_nvme_encapsulation);
|
||||
}
|
||||
|
||||
static DRIVER_ATTR_RO(support_nvme_encapsulation);
|
||||
|
||||
static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
|
||||
{
|
||||
sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
|
||||
@ -7801,6 +7784,7 @@ static int __init megasas_init(void)
|
||||
|
||||
support_poll_for_event = 2;
|
||||
support_device_change = 1;
|
||||
support_nvme_encapsulation = true;
|
||||
|
||||
memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
|
||||
|
||||
@ -7850,8 +7834,17 @@ static int __init megasas_init(void)
|
||||
if (rval)
|
||||
goto err_dcf_support_device_change;
|
||||
|
||||
rval = driver_create_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_support_nvme_encapsulation);
|
||||
if (rval)
|
||||
goto err_dcf_support_nvme_encapsulation;
|
||||
|
||||
return rval;
|
||||
|
||||
err_dcf_support_nvme_encapsulation:
|
||||
driver_remove_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_support_device_change);
|
||||
|
||||
err_dcf_support_device_change:
|
||||
driver_remove_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_dbg_lvl);
|
||||
@ -7884,6 +7877,8 @@ static void __exit megasas_exit(void)
|
||||
driver_remove_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_release_date);
|
||||
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
|
||||
driver_remove_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_support_nvme_encapsulation);
|
||||
|
||||
pci_unregister_driver(&megasas_pci_driver);
|
||||
unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
|
||||
|
@ -168,7 +168,7 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
|
||||
/*
|
||||
* This function will Populate Driver Map using firmware raid map
|
||||
*/
|
||||
void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
|
||||
static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id)
|
||||
{
|
||||
struct fusion_context *fusion = instance->ctrl_context;
|
||||
struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
|
||||
@ -181,7 +181,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
|
||||
|
||||
|
||||
struct MR_DRV_RAID_MAP_ALL *drv_map =
|
||||
fusion->ld_drv_map[(instance->map_id & 1)];
|
||||
fusion->ld_drv_map[(map_id & 1)];
|
||||
struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
|
||||
void *raid_map_data = NULL;
|
||||
|
||||
@ -190,7 +190,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
|
||||
0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
|
||||
|
||||
if (instance->max_raid_mapsize) {
|
||||
fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
|
||||
fw_map_dyn = fusion->ld_map[(map_id & 1)];
|
||||
desc_table =
|
||||
(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
|
||||
if (desc_table != fw_map_dyn->raid_map_desc_table)
|
||||
@ -255,11 +255,11 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
|
||||
|
||||
} else if (instance->supportmax256vd) {
|
||||
fw_map_ext =
|
||||
(struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)];
|
||||
(struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)];
|
||||
ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
|
||||
if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
|
||||
dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
|
||||
return;
|
||||
return 1;
|
||||
}
|
||||
|
||||
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
|
||||
@ -282,9 +282,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
|
||||
cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
|
||||
} else {
|
||||
fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
|
||||
fusion->ld_map[(instance->map_id & 1)];
|
||||
fusion->ld_map[(map_id & 1)];
|
||||
pFwRaidMap = &fw_map_old->raidMap;
|
||||
ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
|
||||
if (ld_count > MAX_LOGICAL_DRIVES) {
|
||||
dev_dbg(&instance->pdev->dev,
|
||||
"LD count exposed in RAID map in not valid\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
|
||||
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
|
||||
pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
|
||||
@ -300,12 +306,14 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
|
||||
sizeof(struct MR_DEV_HANDLE_INFO) *
|
||||
MAX_RAIDMAP_PHYSICAL_DEVICES);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function will validate Map info data provided by FW
|
||||
*/
|
||||
u8 MR_ValidateMapInfo(struct megasas_instance *instance)
|
||||
u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
|
||||
{
|
||||
struct fusion_context *fusion;
|
||||
struct MR_DRV_RAID_MAP_ALL *drv_map;
|
||||
@ -317,11 +325,11 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
|
||||
u16 ld;
|
||||
u32 expected_size;
|
||||
|
||||
|
||||
MR_PopulateDrvRaidMap(instance);
|
||||
if (MR_PopulateDrvRaidMap(instance, map_id))
|
||||
return 0;
|
||||
|
||||
fusion = instance->ctrl_context;
|
||||
drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
|
||||
drv_map = fusion->ld_drv_map[(map_id & 1)];
|
||||
pDrvRaidMap = &drv_map->raidMap;
|
||||
|
||||
lbInfo = fusion->load_balance_info;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user