Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (58 commits)
  [SCSI] zfcp: clear boxed flag on unit reopen.
  [SCSI] zfcp: clear adapter failed flag if an fsf request times out.
  [SCSI] zfcp: rework request ID management.
  [SCSI] zfcp: Fix deadlock between zfcp ERP and SCSI
  [SCSI] zfcp: Locking for req_no and req_seq_no
  [SCSI] zfcp: print S_ID and D_ID with 3 bytes
  [SCSI] ipr: Use PCI-E reset API for new ipr adapter
  [SCSI] qla2xxx: Update version number to 8.01.07-k7.
  [SCSI] qla2xxx: Add MSI support.
  [SCSI] qla2xxx: Correct pci_set_msi() usage semantics.
  [SCSI] qla2xxx: Attempt to stop firmware only if it had been previously executed.
  [SCSI] qla2xxx: Honor NVRAM port-down-retry-count settings.
  [SCSI] qla2xxx: Error-out during probe() if we're unable to complete HBA initialization.
  [SCSI] zfcp: Stop system after memory corruption
  [SCSI] mesh: cleanup variable usage in interrupt handler
  [SCSI] megaraid: replace yield() with cond_resched()
  [SCSI] megaraid: fix warnings when CONFIG_PROC_FS=n
  [SCSI] aacraid: correct SUN products to README
  [SCSI] aacraid: superfluous adapter reset for IBM 8 series ServeRAID controllers
  [SCSI] aacraid: kexec fix (reset interrupt handler)
  ...
This commit is contained in:
Linus Torvalds 2007-05-08 20:32:16 -07:00
commit 6ec129c3a2
53 changed files with 2406 additions and 2154 deletions

View File

@ -59,6 +59,15 @@ Who: Dan Dennedy <dan@dennedy.org>, Stefan Richter <stefanr@s5r6.in-berlin.de>
---------------------------
What: old NCR53C9x driver
When: October 2007
Why: Replaced by the much better esp_scsi driver. Actual low-level
driver can ported over almost trivially.
Who: David Miller <davem@davemloft.net>
Christoph Hellwig <hch@lst.de>
---------------------------
What: Video4Linux API 1 ioctls and video_decoder.h from Video devices.
When: December 2006
Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6

View File

@ -98,8 +98,8 @@ Supported Cards/Chipsets
9005:0285:9005:02b0 (Sunrise Lake ARK)
9005:0285:9005:02b1 Adaptec (Voodoo 8 internal 8 external)
9005:0285:108e:7aac SUN STK RAID REM (Voodoo44 Coyote)
9005:0285:108e:0286 SUN SG-XPCIESAS-R-IN (Cougar)
9005:0285:108e:0287 SUN SG-XPCIESAS-R-EX (Prometheus)
9005:0285:108e:0286 SUN STK RAID INT (Cougar)
9005:0285:108e:0287 SUN STK RAID EXT (Prometheus)
People
-------------------------

View File

@ -118,97 +118,32 @@ _zfcp_hex_dump(char *addr, int count)
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
static int zfcp_reqlist_init(struct zfcp_adapter *adapter)
static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
{
int i;
int idx;
adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
GFP_KERNEL);
if (!adapter->req_list)
return -ENOMEM;
for (i=0; i<REQUEST_LIST_SIZE; i++)
INIT_LIST_HEAD(&adapter->req_list[i]);
for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
INIT_LIST_HEAD(&adapter->req_list[idx]);
return 0;
}
static void zfcp_reqlist_free(struct zfcp_adapter *adapter)
{
struct zfcp_fsf_req *request, *tmp;
unsigned int i;
for (i=0; i<REQUEST_LIST_SIZE; i++) {
if (list_empty(&adapter->req_list[i]))
continue;
list_for_each_entry_safe(request, tmp,
&adapter->req_list[i], list)
list_del(&request->list);
}
kfree(adapter->req_list);
}
void zfcp_reqlist_add(struct zfcp_adapter *adapter,
struct zfcp_fsf_req *fsf_req)
{
unsigned int i;
i = fsf_req->req_id % REQUEST_LIST_SIZE;
list_add_tail(&fsf_req->list, &adapter->req_list[i]);
}
void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id)
{
struct zfcp_fsf_req *request, *tmp;
unsigned int i, counter;
u64 dbg_tmp[2];
i = req_id % REQUEST_LIST_SIZE;
BUG_ON(list_empty(&adapter->req_list[i]));
counter = 0;
list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) {
if (request->req_id == req_id) {
dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
dbg_tmp[1] = (u64) counter;
debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
list_del(&request->list);
break;
}
counter++;
}
}
struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter,
unsigned long req_id)
{
struct zfcp_fsf_req *request, *tmp;
unsigned int i;
/* 0 is reserved as an invalid req_id */
if (req_id == 0)
return NULL;
i = req_id % REQUEST_LIST_SIZE;
list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list)
if (request->req_id == req_id)
return request;
return NULL;
}
int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
{
unsigned int i;
unsigned int idx;
for (i=0; i<REQUEST_LIST_SIZE; i++)
if (!list_empty(&adapter->req_list[i]))
for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
if (!list_empty(&adapter->req_list[idx]))
return 0;
return 1;
}
@ -913,6 +848,8 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
unit->sysfs_device.release = zfcp_sysfs_unit_release;
dev_set_drvdata(&unit->sysfs_device, unit);
init_waitqueue_head(&unit->scsi_scan_wq);
/* mark unit unusable as long as sysfs registration is not complete */
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
@ -1104,7 +1041,7 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
/* initialize list of fsf requests */
spin_lock_init(&adapter->req_list_lock);
retval = zfcp_reqlist_init(adapter);
retval = zfcp_reqlist_alloc(adapter);
if (retval) {
ZFCP_LOG_INFO("request list initialization failed\n");
goto failed_low_mem_buffers;
@ -1165,6 +1102,7 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
sysfs_failed:
dev_set_drvdata(&ccw_device->dev, NULL);
zfcp_reqlist_free(adapter);
failed_low_mem_buffers:
zfcp_free_low_mem_buffers(adapter);
if (qdio_free(ccw_device) != 0)
@ -1497,7 +1435,7 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
if (!port || (port->wwpn != (*(wwn_t *) &els_plogi->serv_param.wwpn))) {
ZFCP_LOG_DEBUG("ignored incoming PLOGI for nonexisting port "
"with d_id 0x%08x on adapter %s\n",
"with d_id 0x%06x on adapter %s\n",
status_buffer->d_id,
zfcp_get_busid_by_adapter(adapter));
} else {
@ -1522,7 +1460,7 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
if (!port || (port->wwpn != els_logo->nport_wwpn)) {
ZFCP_LOG_DEBUG("ignored incoming LOGO for nonexisting port "
"with d_id 0x%08x on adapter %s\n",
"with d_id 0x%06x on adapter %s\n",
status_buffer->d_id,
zfcp_get_busid_by_adapter(adapter));
} else {
@ -1704,7 +1642,7 @@ static void zfcp_ns_gid_pn_handler(unsigned long data)
/* looks like a valid d_id */
port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
ZFCP_LOG_DEBUG("adapter %s: wwpn=0x%016Lx ---> d_id=0x%08x\n",
ZFCP_LOG_DEBUG("adapter %s: wwpn=0x%016Lx ---> d_id=0x%06x\n",
zfcp_get_busid_by_port(port), port->wwpn, port->d_id);
goto out;

View File

@ -637,6 +637,7 @@ do { \
#define ZFCP_STATUS_UNIT_SHARED 0x00000004
#define ZFCP_STATUS_UNIT_READONLY 0x00000008
#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010
#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020
/* FSF request status (this does not have a common part) */
#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000
@ -980,6 +981,10 @@ struct zfcp_unit {
struct scsi_device *device; /* scsi device struct pointer */
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
wait_queue_head_t scsi_scan_wq; /* can be used to wait until
all scsi_scan_target
requests have been
completed. */
};
/* FSF request */
@ -1084,6 +1089,42 @@ extern void _zfcp_hex_dump(char *, int);
#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
/*
* Helper functions for request ID management.
*/
static inline int zfcp_reqlist_hash(unsigned long req_id)
{
return req_id % REQUEST_LIST_SIZE;
}
static inline void zfcp_reqlist_add(struct zfcp_adapter *adapter,
struct zfcp_fsf_req *fsf_req)
{
unsigned int idx;
idx = zfcp_reqlist_hash(fsf_req->req_id);
list_add_tail(&fsf_req->list, &adapter->req_list[idx]);
}
static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
struct zfcp_fsf_req *fsf_req)
{
list_del(&fsf_req->list);
}
static inline struct zfcp_fsf_req *
zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
{
struct zfcp_fsf_req *request;
unsigned int idx;
idx = zfcp_reqlist_hash(req_id);
list_for_each_entry(request, &adapter->req_list[idx], list)
if (request->req_id == req_id)
return request;
return NULL;
}
/*
* functions needed for reference/usage counting
*/

View File

@ -179,7 +179,7 @@ static void zfcp_close_fsf(struct zfcp_adapter *adapter)
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
zfcp_erp_adapter_reopen(adapter, 0);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
}
void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
@ -342,9 +342,9 @@ zfcp_erp_adisc(struct zfcp_port *port)
adisc->wwpn = fc_host_port_name(adapter->scsi_host);
adisc->wwnn = fc_host_node_name(adapter->scsi_host);
adisc->nport_id = fc_host_port_id(adapter->scsi_host);
ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x "
ZFCP_LOG_INFO("ADISC request from s_id 0x%06x to d_id 0x%06x "
"(wwpn=0x%016Lx, wwnn=0x%016Lx, "
"hard_nport_id=0x%08x, nport_id=0x%08x)\n",
"hard_nport_id=0x%06x, nport_id=0x%06x)\n",
adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn,
(wwn_t) adisc->wwnn, adisc->hard_nport_id,
adisc->nport_id);
@ -352,7 +352,7 @@ zfcp_erp_adisc(struct zfcp_port *port)
retval = zfcp_fsf_send_els(send_els);
if (retval != 0) {
ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port "
"0x%08x on adapter %s\n", send_els->d_id,
"0x%06x on adapter %s\n", send_els->d_id,
zfcp_get_busid_by_adapter(adapter));
goto freemem;
}
@ -398,7 +398,7 @@ zfcp_erp_adisc_handler(unsigned long data)
if (send_els->status != 0) {
ZFCP_LOG_NORMAL("ELS request rejected/timed out, "
"force physical port reopen "
"(adapter %s, port d_id=0x%08x)\n",
"(adapter %s, port d_id=0x%06x)\n",
zfcp_get_busid_by_adapter(adapter), d_id);
debug_text_event(adapter->erp_dbf, 3, "forcreop");
if (zfcp_erp_port_forced_reopen(port, 0))
@ -411,9 +411,9 @@ zfcp_erp_adisc_handler(unsigned long data)
adisc = zfcp_sg_to_address(send_els->resp);
ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id "
"0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
"hard_nport_id=0x%08x, nport_id=0x%08x)\n",
ZFCP_LOG_INFO("ADISC response from d_id 0x%06x to s_id "
"0x%06x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
"hard_nport_id=0x%06x, nport_id=0x%06x)\n",
d_id, fc_host_port_id(adapter->scsi_host),
(wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn,
adisc->hard_nport_id, adisc->nport_id);
@ -847,8 +847,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
if (erp_action->fsf_req) {
/* take lock to ensure that request is not deleted meanwhile */
spin_lock(&adapter->req_list_lock);
if (zfcp_reqlist_ismember(adapter,
erp_action->fsf_req->req_id)) {
if (zfcp_reqlist_find(adapter, erp_action->fsf_req->req_id)) {
/* fsf_req still exists */
debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
debug_event(adapter->erp_dbf, 3, &erp_action->fsf_req,
@ -1377,7 +1376,7 @@ zfcp_erp_port_failed(struct zfcp_port *port)
if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
ZFCP_LOG_NORMAL("port erp failed (adapter %s, "
"port d_id=0x%08x)\n",
"port d_id=0x%06x)\n",
zfcp_get_busid_by_port(port), port->d_id);
else
ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n",
@ -1591,6 +1590,62 @@ zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result)
return result;
}
struct zfcp_erp_add_work {
struct zfcp_unit *unit;
struct work_struct work;
};
/**
* zfcp_erp_scsi_scan
* @data: pointer to a struct zfcp_erp_add_work
*
* Registers a logical unit with the SCSI stack.
*/
static void zfcp_erp_scsi_scan(struct work_struct *work)
{
struct zfcp_erp_add_work *p =
container_of(work, struct zfcp_erp_add_work, work);
struct zfcp_unit *unit = p->unit;
struct fc_rport *rport = unit->port->rport;
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
unit->scsi_lun, 0);
atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
wake_up(&unit->scsi_scan_wq);
zfcp_unit_put(unit);
kfree(p);
}
/**
* zfcp_erp_schedule_work
* @unit: pointer to unit which should be registered with SCSI stack
*
* Schedules work which registers a unit with the SCSI stack
*/
static void
zfcp_erp_schedule_work(struct zfcp_unit *unit)
{
struct zfcp_erp_add_work *p;
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
ZFCP_LOG_NORMAL("error: Out of resources. Could not register "
"the FCP-LUN 0x%Lx connected to "
"the port with WWPN 0x%Lx connected to "
"the adapter %s with the SCSI stack.\n",
unit->fcp_lun,
unit->port->wwpn,
zfcp_get_busid_by_unit(unit));
return;
}
zfcp_unit_get(unit);
memset(p, 0, sizeof(*p));
atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
INIT_WORK(&p->work, zfcp_erp_scsi_scan);
p->unit = unit;
schedule_work(&p->work);
}
/*
* function:
*
@ -2401,7 +2456,7 @@ zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *erp_action)
retval = ZFCP_ERP_FAILED;
}
} else {
ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%08x -> "
ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> "
"trying open\n", port->wwpn, port->d_id);
retval = zfcp_erp_port_strategy_open_port(erp_action);
}
@ -2441,7 +2496,7 @@ zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *erp_action)
case ZFCP_ERP_STEP_UNINITIALIZED:
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
case ZFCP_ERP_STEP_PORT_CLOSING:
ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%08x -> trying open\n",
ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> trying open\n",
port->wwpn, port->d_id);
retval = zfcp_erp_port_strategy_open_port(erp_action);
break;
@ -3092,9 +3147,9 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
&& port->rport) {
atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED,
&unit->status);
scsi_scan_target(&port->rport->dev, 0,
port->rport->scsi_target_id,
unit->scsi_lun, 0);
if (atomic_test_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING,
&unit->status) == 0)
zfcp_erp_schedule_work(unit);
}
zfcp_unit_put(unit);
break;
@ -3121,7 +3176,7 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
zfcp_get_busid_by_port(port),
port->wwpn);
else {
scsi_flush_work(adapter->scsi_host);
scsi_target_unblock(&port->rport->dev);
port->rport->maxframe_size = port->maxframe_size;
port->rport->supported_classes =
port->supported_classes;

View File

@ -184,10 +184,6 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
unsigned long);
extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
struct scsi_cmnd *);
extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *);
extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long);
extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *,
unsigned long);
extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
#endif /* ZFCP_EXT_H */

View File

@ -828,7 +828,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) {
ZFCP_LOG_NORMAL("bug: Reopen port indication received for"
"nonexisting port with d_id 0x%08x on "
"nonexisting port with d_id 0x%06x on "
"adapter %s. Ignored.\n",
status_buffer->d_id & ZFCP_DID_MASK,
zfcp_get_busid_by_adapter(adapter));
@ -853,7 +853,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
&status_buffer->status_subtype, sizeof (u32));
ZFCP_LOG_NORMAL("bug: Undefined status subtype received "
"for a reopen indication on port with "
"d_id 0x%08x on the adapter %s. "
"d_id 0x%06x on the adapter %s. "
"Ignored. (debug info 0x%x)\n",
status_buffer->d_id,
zfcp_get_busid_by_adapter(adapter),
@ -1156,7 +1156,7 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
}
ZFCP_LOG_DEBUG("Abort FCP Command request initiated "
"(adapter%s, port d_id=0x%08x, "
"(adapter%s, port d_id=0x%06x, "
"unit x%016Lx, old_req_id=0x%lx)\n",
zfcp_get_busid_by_adapter(adapter),
unit->port->d_id,
@ -1554,7 +1554,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
case FSF_ACCESS_DENIED:
ZFCP_LOG_NORMAL("access denied, cannot send generic service "
"command (adapter %s, port d_id=0x%08x)\n",
"command (adapter %s, port d_id=0x%06x)\n",
zfcp_get_busid_by_port(port), port->d_id);
for (counter = 0; counter < 2; counter++) {
subtable = header->fsf_status_qual.halfword[counter * 2];
@ -1576,7 +1576,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
case FSF_GENERIC_COMMAND_REJECTED:
ZFCP_LOG_INFO("generic service command rejected "
"(adapter %s, port d_id=0x%08x)\n",
"(adapter %s, port d_id=0x%06x)\n",
zfcp_get_busid_by_port(port), port->d_id);
ZFCP_LOG_INFO("status qualifier:\n");
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
@ -1602,7 +1602,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
case FSF_PORT_BOXED:
ZFCP_LOG_INFO("port needs to be reopened "
"(adapter %s, port d_id=0x%08x)\n",
"(adapter %s, port d_id=0x%06x)\n",
zfcp_get_busid_by_port(port), port->d_id);
debug_text_event(adapter->erp_dbf, 2, "fsf_s_pboxed");
zfcp_erp_port_boxed(port);
@ -1683,7 +1683,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
NULL, &lock_flags, &fsf_req);
if (ret < 0) {
ZFCP_LOG_INFO("error: creation of ELS request failed "
"(adapter %s, port d_id: 0x%08x)\n",
"(adapter %s, port d_id: 0x%06x)\n",
zfcp_get_busid_by_adapter(adapter), d_id);
goto failed_req;
}
@ -1708,7 +1708,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
ZFCP_MAX_SBALS_PER_ELS_REQ);
if (bytes <= 0) {
ZFCP_LOG_INFO("error: creation of ELS request failed "
"(adapter %s, port d_id: 0x%08x)\n",
"(adapter %s, port d_id: 0x%06x)\n",
zfcp_get_busid_by_adapter(adapter), d_id);
if (bytes == 0) {
ret = -ENOMEM;
@ -1725,7 +1725,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
ZFCP_MAX_SBALS_PER_ELS_REQ);
if (bytes <= 0) {
ZFCP_LOG_INFO("error: creation of ELS request failed "
"(adapter %s, port d_id: 0x%08x)\n",
"(adapter %s, port d_id: 0x%06x)\n",
zfcp_get_busid_by_adapter(adapter), d_id);
if (bytes == 0) {
ret = -ENOMEM;
@ -1739,7 +1739,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
/* reject request */
ZFCP_LOG_INFO("error: microcode does not support chained SBALs"
", ELS request too big (adapter %s, "
"port d_id: 0x%08x)\n",
"port d_id: 0x%06x)\n",
zfcp_get_busid_by_adapter(adapter), d_id);
ret = -EOPNOTSUPP;
goto failed_send;
@ -1760,13 +1760,13 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
ret = zfcp_fsf_req_send(fsf_req);
if (ret) {
ZFCP_LOG_DEBUG("error: initiation of ELS request failed "
"(adapter %s, port d_id: 0x%08x)\n",
"(adapter %s, port d_id: 0x%06x)\n",
zfcp_get_busid_by_adapter(adapter), d_id);
goto failed_send;
}
ZFCP_LOG_DEBUG("ELS request initiated (adapter %s, port d_id: "
"0x%08x)\n", zfcp_get_busid_by_adapter(adapter), d_id);
"0x%06x)\n", zfcp_get_busid_by_adapter(adapter), d_id);
goto out;
failed_send:
@ -1859,7 +1859,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
case FSF_ELS_COMMAND_REJECTED:
ZFCP_LOG_INFO("ELS has been rejected because command filter "
"prohibited sending "
"(adapter: %s, port d_id: 0x%08x)\n",
"(adapter: %s, port d_id: 0x%06x)\n",
zfcp_get_busid_by_adapter(adapter), d_id);
break;
@ -1907,7 +1907,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
case FSF_ACCESS_DENIED:
ZFCP_LOG_NORMAL("access denied, cannot send ELS command "
"(adapter %s, port d_id=0x%08x)\n",
"(adapter %s, port d_id=0x%06x)\n",
zfcp_get_busid_by_adapter(adapter), d_id);
for (counter = 0; counter < 2; counter++) {
subtable = header->fsf_status_qual.halfword[counter * 2];
@ -2070,7 +2070,7 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
ZFCP_LOG_NORMAL("The adapter %s reported the following characteristics:\n"
"WWNN 0x%016Lx, "
"WWPN 0x%016Lx, "
"S_ID 0x%08x,\n"
"S_ID 0x%06x,\n"
"adapter version 0x%x, "
"LIC version 0x%x, "
"FC link speed %d Gb/s\n",
@ -3043,6 +3043,7 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
queue_designator = &header->fsf_status_qual.fsf_queue_designator;
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED |
ZFCP_STATUS_UNIT_SHARED |
ZFCP_STATUS_UNIT_READONLY,
&unit->status);
@ -4645,23 +4646,22 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
fsf_req->adapter = adapter;
fsf_req->fsf_command = fsf_cmd;
INIT_LIST_HEAD(&fsf_req->list);
/* this is serialized (we are holding req_queue-lock of adapter */
if (adapter->req_no == 0)
adapter->req_no++;
fsf_req->req_id = adapter->req_no++;
init_timer(&fsf_req->timer);
zfcp_fsf_req_qtcb_init(fsf_req);
/* initialize waitqueue which may be used to wait on
this request completion */
init_waitqueue_head(&fsf_req->completion_wq);
ret = zfcp_fsf_req_sbal_get(adapter, req_flags, lock_flags);
if(ret < 0) {
if (ret < 0)
goto failed_sbals;
}
/* this is serialized (we are holding req_queue-lock of adapter) */
if (adapter->req_no == 0)
adapter->req_no++;
fsf_req->req_id = adapter->req_no++;
zfcp_fsf_req_qtcb_init(fsf_req);
/*
* We hold queue_lock here. Check if QDIOUP is set and let request fail
@ -4788,7 +4788,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req)
retval = -EIO;
del_timer(&fsf_req->timer);
spin_lock(&adapter->req_list_lock);
zfcp_reqlist_remove(adapter, fsf_req->req_id);
zfcp_reqlist_remove(adapter, fsf_req);
spin_unlock(&adapter->req_list_lock);
/* undo changes in request queue made for this request */
zfcp_qdio_zero_sbals(req_queue->buffer,

View File

@ -283,10 +283,10 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
}
/**
* zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status
* zfcp_qdio_reqid_check - checks for valid reqids.
*/
static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
unsigned long req_id)
static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
unsigned long req_id)
{
struct zfcp_fsf_req *fsf_req;
unsigned long flags;
@ -294,23 +294,22 @@ static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
debug_long_event(adapter->erp_dbf, 4, req_id);
spin_lock_irqsave(&adapter->req_list_lock, flags);
fsf_req = zfcp_reqlist_ismember(adapter, req_id);
fsf_req = zfcp_reqlist_find(adapter, req_id);
if (!fsf_req) {
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id);
zfcp_erp_adapter_reopen(adapter, 0);
return -EINVAL;
}
if (!fsf_req)
/*
* Unknown request means that we have potentially memory
* corruption and must stop the machine immediatly.
*/
panic("error: unknown request id (%ld) on adapter %s.\n",
req_id, zfcp_get_busid_by_adapter(adapter));
zfcp_reqlist_remove(adapter, req_id);
zfcp_reqlist_remove(adapter, fsf_req);
atomic_dec(&adapter->reqs_active);
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
/* finish the FSF request */
zfcp_fsf_req_complete(fsf_req);
return 0;
}
/*
@ -374,27 +373,9 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
/* look for QDIO request identifiers in SB */
buffere = &buffer->element[buffere_index];
retval = zfcp_qdio_reqid_check(adapter,
(unsigned long) buffere->addr);
zfcp_qdio_reqid_check(adapter,
(unsigned long) buffere->addr);
if (retval) {
ZFCP_LOG_NORMAL("bug: unexpected inbound "
"packet on adapter %s "
"(reqid=0x%lx, "
"first_element=%d, "
"elements_processed=%d)\n",
zfcp_get_busid_by_adapter(adapter),
(unsigned long) buffere->addr,
first_element,
elements_processed);
ZFCP_LOG_NORMAL("hex dump of inbound buffer "
"at address %p "
"(buffer_index=%d, "
"buffere_index=%d)\n", buffer,
buffer_index, buffere_index);
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
(char *) buffer, SBAL_SIZE);
}
/*
* A single used SBALE per inbound SBALE has been
* implemented by QDIO so far. Hope they will

View File

@ -22,6 +22,7 @@
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
#include "zfcp_ext.h"
#include <asm/atomic.h>
static void zfcp_scsi_slave_destroy(struct scsi_device *sdp);
static int zfcp_scsi_slave_alloc(struct scsi_device *sdp);
@ -179,6 +180,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
if (unit) {
zfcp_erp_wait(unit->port->adapter);
wait_event(unit->scsi_scan_wq,
atomic_test_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING,
&unit->status) == 0);
atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
sdpnt->hostdata = NULL;
unit->device = NULL;
@ -402,8 +407,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
/* Check whether corresponding fsf_req is still pending */
spin_lock(&adapter->req_list_lock);
fsf_req = zfcp_reqlist_ismember(adapter, (unsigned long)
scpnt->host_scribble);
fsf_req = zfcp_reqlist_find(adapter,
(unsigned long) scpnt->host_scribble);
spin_unlock(&adapter->req_list_lock);
if (!fsf_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);

View File

@ -387,12 +387,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
* Ok now init the communication subsystem
*/
dev->queues = kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
if (dev->queues == NULL) {
printk(KERN_ERR "Error could not allocate comm region.\n");
return NULL;
}
memset(dev->queues, 0, sizeof(struct aac_queue_block));
if (aac_comm_init(dev)<0){
kfree(dev->queues);

View File

@ -1223,13 +1223,11 @@ int aac_check_health(struct aac_dev * aac)
* Warning: no sleep allowed while
* holding spinlock
*/
hw_fib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
fib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
if (fib && hw_fib) {
struct aac_aifcmd * aif;
memset(hw_fib, 0, sizeof(struct hw_fib));
memset(fib, 0, sizeof(struct fib));
fib->hw_fib_va = hw_fib;
fib->dev = aac;
aac_fib_init(fib);

View File

@ -248,16 +248,14 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
* manage the linked lists.
*/
if ((!dev->aif_thread)
|| (!(fib = kmalloc(sizeof(struct fib),GFP_ATOMIC))))
|| (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
return 1;
if (!(hw_fib = kmalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
kfree (fib);
return 1;
}
memset(hw_fib, 0, sizeof(struct hw_fib));
memcpy(hw_fib, (struct hw_fib *)(((ptrdiff_t)(dev->regs.sa)) +
(index & ~0x00000002L)), sizeof(struct hw_fib));
memset(fib, 0, sizeof(struct fib));
INIT_LIST_HEAD(&fib->fiblink);
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof(struct fib);

View File

@ -539,8 +539,10 @@ int _aac_rx_init(struct aac_dev *dev)
}
/* Failure to reset here is an option ... */
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
if ((((status & 0xff) != 0xff) || reset_devices) &&
if ((((status & 0x0c) != 0x0c) || reset_devices) &&
!aac_rx_restart_adapter(dev, 0))
++restart;
/*

View File

@ -319,10 +319,9 @@ ch_readconfig(scsi_changer *ch)
int result,id,lun,i;
u_int elem;
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
memset(buffer,0,512);
memset(cmd,0,sizeof(cmd));
cmd[0] = MODE_SENSE;
@ -530,10 +529,9 @@ ch_set_voltag(scsi_changer *ch, u_int elem,
u_char *buffer;
int result;
buffer = kmalloc(512, GFP_KERNEL);
buffer = kzalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
memset(buffer,0,512);
dprintk("%s %s voltag: 0x%x => \"%s\"\n",
clear ? "clear" : "set",
@ -922,11 +920,10 @@ static int ch_probe(struct device *dev)
if (sd->type != TYPE_MEDIUM_CHANGER)
return -ENODEV;
ch = kmalloc(sizeof(*ch), GFP_KERNEL);
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (NULL == ch)
return -ENOMEM;
memset(ch,0,sizeof(*ch));
ch->minor = ch_devcount;
sprintf(ch->name,"ch%d",ch->minor);
mutex_init(&ch->lock);

View File

@ -1308,13 +1308,12 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
schedule_timeout_uninterruptible(1);
} while (m == EMPTY_QUEUE);
status = kmalloc(4, GFP_KERNEL|ADDR32);
status = kzalloc(4, GFP_KERNEL|ADDR32);
if(status == NULL) {
adpt_send_nop(pHba, m);
printk(KERN_ERR"IOP reset failed - no free memory.\n");
return -ENOMEM;
}
memset(status,0,4);
msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
@ -1504,21 +1503,19 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba)
continue;
}
if( pHba->channel[bus_no].device[scsi_id] == NULL){
pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
if(pDev == NULL) {
return -ENOMEM;
}
pHba->channel[bus_no].device[scsi_id] = pDev;
memset(pDev,0,sizeof(struct adpt_device));
} else {
for( pDev = pHba->channel[bus_no].device[scsi_id];
pDev->next_lun; pDev = pDev->next_lun){
}
pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
if(pDev->next_lun == NULL) {
return -ENOMEM;
}
memset(pDev->next_lun,0,sizeof(struct adpt_device));
pDev = pDev->next_lun;
}
pDev->tid = tid;
@ -1667,12 +1664,11 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
reply_size = REPLY_FRAME_SIZE;
}
reply_size *= 4;
reply = kmalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
if(reply == NULL) {
printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
return -ENOMEM;
}
memset(reply,0,REPLY_FRAME_SIZE*4);
sg_offset = (msg[0]>>4)&0xf;
msg[2] = 0x40000000; // IOCTL context
msg[3] = (u32)reply;
@ -2444,7 +2440,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
}
pDev = pHba->channel[bus_no].device[scsi_id];
if( pDev == NULL){
pDev = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
if(pDev == NULL) {
return -ENOMEM;
}
@ -2453,12 +2449,11 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
while (pDev->next_lun) {
pDev = pDev->next_lun;
}
pDev = pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
if(pDev == NULL) {
return -ENOMEM;
}
}
memset(pDev,0,sizeof(struct adpt_device));
pDev->tid = d->lct_data.tid;
pDev->scsi_channel = bus_no;
pDev->scsi_id = scsi_id;

View File

@ -92,6 +92,7 @@ static unsigned int ipr_fastfail = 0;
static unsigned int ipr_transop_timeout = 0;
static unsigned int ipr_enable_cache = 1;
static unsigned int ipr_debug = 0;
static unsigned int ipr_dual_ioa_raid = 1;
static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */
@ -158,6 +159,8 @@ module_param_named(enable_cache, ipr_enable_cache, int, 0);
MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
module_param_named(debug, ipr_debug, int, 0);
MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
@ -206,6 +209,8 @@ struct ipr_error_table_t ipr_error_table[] = {
"8009: Impending cache battery pack failure"},
{0x02040400, 0, 0,
"34FF: Disk device format in progress"},
{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
"9070: IOA requested reset"},
{0x023F0000, 0, 0,
"Synchronization required"},
{0x024E0000, 0, 0,
@ -950,6 +955,53 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
}
}
/**
* strip_and_pad_whitespace - Strip and pad trailing whitespace.
* @i: index into buffer
* @buf: string to modify
*
* This function will strip all trailing whitespace, pad the end
* of the string with a single space, and NULL terminate the string.
*
* Return value:
* new length of string
**/
static int strip_and_pad_whitespace(int i, char *buf)
{
while (i && buf[i] == ' ')
i--;
buf[i+1] = ' ';
buf[i+2] = '\0';
return i + 2;
}
/**
* ipr_log_vpd_compact - Log the passed extended VPD compactly.
* @prefix: string to print at start of printk
* @hostrcb: hostrcb pointer
* @vpd: vendor/product id/sn struct
*
* Return value:
* none
**/
static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
struct ipr_vpd *vpd)
{
char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
int i = 0;
memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
}
/**
* ipr_log_vpd - Log the passed VPD to the error log.
* @vpd: vendor/product id/sn struct
@ -973,6 +1025,23 @@ static void ipr_log_vpd(struct ipr_vpd *vpd)
ipr_err(" Serial Number: %s\n", buffer);
}
/**
* ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
* @prefix: string to print at start of printk
* @hostrcb: hostrcb pointer
* @vpd: vendor/product id/sn/wwn struct
*
* Return value:
* none
**/
static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
struct ipr_ext_vpd *vpd)
{
ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
}
/**
* ipr_log_ext_vpd - Log the passed extended VPD to the error log.
* @vpd: vendor/product id/sn/wwn struct
@ -1287,10 +1356,11 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
error = &hostrcb->hcam.u.error.u.type_17_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
strstrip(error->failure_reason);
ipr_err("%s\n", error->failure_reason);
ipr_err("Remote Adapter VPD:\n");
ipr_log_ext_vpd(&error->vpd);
ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
be32_to_cpu(hostrcb->hcam.u.error.prc));
ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ipr_log_hex_data(ioa_cfg, error->data,
be32_to_cpu(hostrcb->hcam.length) -
(offsetof(struct ipr_hostrcb_error, u) +
@ -1312,10 +1382,11 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
error = &hostrcb->hcam.u.error.u.type_07_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
strstrip(error->failure_reason);
ipr_err("%s\n", error->failure_reason);
ipr_err("Remote Adapter VPD:\n");
ipr_log_vpd(&error->vpd);
ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
be32_to_cpu(hostrcb->hcam.u.error.prc));
ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ipr_log_hex_data(ioa_cfg, error->data,
be32_to_cpu(hostrcb->hcam.length) -
(offsetof(struct ipr_hostrcb_error, u) +
@ -1672,12 +1743,15 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
list_del(&hostrcb->queue);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
if (!ioasc) {
ipr_handle_log_data(ioa_cfg, hostrcb);
if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
dev_err(&ioa_cfg->pdev->dev,
"Host RCB failed with IOASC: 0x%08X\n", ioasc);
@ -2635,8 +2709,13 @@ static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
while(ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
ioa_cfg->errors_logged = 0;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
@ -2958,6 +3037,11 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
unsigned long lock_flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
while(ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
if (ioa_cfg->ucode_sglist) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@ -4656,18 +4740,19 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
if (!res) {
ipr_scsi_eh_done(ipr_cmd);
return;
}
if (!ipr_is_gscsi(res))
if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
ipr_gen_sense(ipr_cmd);
ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
switch (ioasc & IPR_IOASC_IOASC_MASK) {
switch (masked_ioasc) {
case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
if (ipr_is_naca_model(res))
scsi_cmd->result |= (DID_ABORT << 16);
@ -5363,6 +5448,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
}
scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
ioa_cfg->reset_retries = 0;
@ -5798,6 +5884,94 @@ static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
return IPR_RC_JOB_RETURN;
}
/**
* ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
* @ipr_cmd: ipr command struct
*
* This function enables dual IOA RAID support if possible.
*
* Return value:
* IPR_RC_JOB_RETURN
**/
static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
struct ipr_mode_page24 *mode_page;
int length;
ENTER;
mode_page = ipr_get_mode_page(mode_pages, 0x24,
sizeof(struct ipr_mode_page24));
if (mode_page)
mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
length = mode_pages->hdr.length + 1;
mode_pages->hdr.length = 0;
ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
length);
ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
LEAVE;
return IPR_RC_JOB_RETURN;
}
/**
* ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
* @ipr_cmd: ipr command struct
*
* This function handles the failure of a Mode Sense to the IOAFP.
* Some adapters do not handle all mode pages.
*
* Return value:
* IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
**/
static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
{
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
return IPR_RC_JOB_CONTINUE;
}
return ipr_reset_cmd_failed(ipr_cmd);
}
/**
* ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
* @ipr_cmd: ipr command struct
*
* This function send a mode sense to the IOA to retrieve
* the IOA Advanced Function Control mode page.
*
* Return value:
* IPR_RC_JOB_RETURN
**/
static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ENTER;
ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
0x24, ioa_cfg->vpd_cbs_dma +
offsetof(struct ipr_misc_cbs, mode_pages),
sizeof(struct ipr_mode_pages));
ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
LEAVE;
return IPR_RC_JOB_RETURN;
}
/**
* ipr_init_res_table - Initialize the resource table
* @ipr_cmd: ipr command struct
@ -5866,7 +6040,10 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
}
}
ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
else
ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
LEAVE;
return IPR_RC_JOB_CONTINUE;
@ -5888,8 +6065,11 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
ENTER;
if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
ioa_cfg->dual_raid = 1;
dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
ucode_vpd->major_release, ucode_vpd->card_type,
ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
@ -5972,6 +6152,37 @@ static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
return 0;
}
/**
* ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
* @ipr_cmd: ipr command struct
*
* This function sends a Page 0xD0 inquiry to the adapter
* to retrieve adapter capabilities.
*
* Return value:
* IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
**/
static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
ENTER;
ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
memset(cap, 0, sizeof(*cap));
if (ipr_inquiry_page_supported(page0, 0xD0)) {
ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
sizeof(struct ipr_inquiry_cap));
return IPR_RC_JOB_RETURN;
}
LEAVE;
return IPR_RC_JOB_CONTINUE;
}
/**
* ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
* @ipr_cmd: ipr command struct
@ -5992,7 +6203,7 @@ static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
if (!ipr_inquiry_page_supported(page0, 1))
ioa_cfg->cache_state = CACHE_NONE;
ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
ipr_ioafp_inquiry(ipr_cmd, 1, 3,
ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
@ -6278,6 +6489,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
struct ipr_hostrcb *hostrcb;
struct ipr_uc_sdt sdt;
int rc, length;
u32 ioasc;
mailbox = readl(ioa_cfg->ioa_mailbox);
@ -6310,9 +6522,13 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
(__be32 *)&hostrcb->hcam,
min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
if (!rc)
if (!rc) {
ipr_handle_log_data(ioa_cfg, hostrcb);
else
ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
ioa_cfg->sdt_state == GET_DUMP)
ioa_cfg->sdt_state = WAIT_FOR_DUMP;
} else
ipr_unit_check_no_data(ioa_cfg);
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
@ -6424,6 +6640,48 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
return rc;
}
/**
* ipr_reset_slot_reset_done - Clear PCI reset to the adapter
* @ipr_cmd: ipr command struct
*
* Description: This clears PCI reset to the adapter and delays two seconds.
*
* Return value:
* IPR_RC_JOB_RETURN
**/
static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
{
ENTER;
pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
ipr_cmd->job_step = ipr_reset_bist_done;
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
LEAVE;
return IPR_RC_JOB_RETURN;
}
/**
* ipr_reset_slot_reset - Reset the PCI slot of the adapter.
* @ipr_cmd: ipr command struct
*
* Description: This asserts PCI reset to the adapter.
*
* Return value:
* IPR_RC_JOB_RETURN
**/
static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
pci_block_user_cfg_access(pdev);
pci_set_pcie_reset_state(pdev, pcie_warm_reset);
ipr_cmd->job_step = ipr_reset_slot_reset_done;
ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
LEAVE;
return IPR_RC_JOB_RETURN;
}
/**
* ipr_reset_allowed - Query whether or not IOA can be reset
* @ioa_cfg: ioa config struct
@ -6463,7 +6721,7 @@ static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
} else {
ipr_cmd->job_step = ipr_reset_start_bist;
ipr_cmd->job_step = ioa_cfg->reset;
rc = IPR_RC_JOB_CONTINUE;
}
@ -6496,7 +6754,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
} else {
ipr_cmd->job_step = ipr_reset_start_bist;
ipr_cmd->job_step = ioa_cfg->reset;
}
ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
@ -6591,12 +6849,14 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
if (shutdown_type == IPR_SHUTDOWN_ABBREV)
timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
if (shutdown_type == IPR_SHUTDOWN_NORMAL)
timeout = IPR_SHUTDOWN_TIMEOUT;
else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
timeout = IPR_INTERNAL_TIMEOUT;
else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
else
timeout = IPR_SHUTDOWN_TIMEOUT;
timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
@ -6776,8 +7036,11 @@ static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
IPR_SHUTDOWN_NONE);
if (ioa_cfg->needs_warm_reset)
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
else
_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
return PCI_ERS_RESULT_RECOVERED;
}
@ -7226,7 +7489,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
unsigned long ipr_regs_pci;
void __iomem *ipr_regs;
int rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc;
volatile u32 mask, uproc, interrupts;
ENTER;
@ -7265,6 +7528,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
else
ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
rc = pci_read_config_byte(pdev, PCI_REVISION_ID, &ioa_cfg->revid);
if (rc != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev, "Failed to read PCI revision ID\n");
rc = -EIO;
goto out_scsi_host_put;
}
ipr_regs_pci = pci_resource_start(pdev, 0);
rc = pci_request_regions(pdev, IPR_NAME);
@ -7333,9 +7604,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
* the card is in an unknown state and needs a hard reset
*/
mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
ioa_cfg->needs_hard_reset = 1;
if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
ioa_cfg->needs_hard_reset = 1;
if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
@ -7346,6 +7622,13 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
goto cleanup_nolog;
}
if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
(dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
ioa_cfg->needs_warm_reset = 1;
ioa_cfg->reset = ipr_reset_slot_reset;
} else
ioa_cfg->reset = ipr_reset_start_bist;
spin_lock(&ipr_driver_lock);
list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
spin_unlock(&ipr_driver_lock);
@ -7428,6 +7711,12 @@ static void __ipr_remove(struct pci_dev *pdev)
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
while(ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
}
ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
@ -7551,6 +7840,12 @@ static void ipr_shutdown(struct pci_dev *pdev)
unsigned long lock_flags = 0;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
while(ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
@ -7577,19 +7872,22 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT},
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 0 },
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT },
@ -7597,7 +7895,7 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT },
IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
@ -7627,6 +7925,7 @@ static struct pci_driver ipr_driver = {
.remove = ipr_remove,
.shutdown = ipr_shutdown,
.err_handler = &ipr_err_handler,
.dynids.use_driver_data = 1
};
/**

View File

@ -37,8 +37,8 @@
/*
* Literals
*/
#define IPR_DRIVER_VERSION "2.3.2"
#define IPR_DRIVER_DATE "(March 23, 2007)"
#define IPR_DRIVER_VERSION "2.4.1"
#define IPR_DRIVER_DATE "(April 24, 2007)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@ -91,6 +91,7 @@
* IOASCs
*/
#define IPR_IOASC_NR_INIT_CMD_REQUIRED 0x02040200
#define IPR_IOASC_NR_IOA_RESET_REQUIRED 0x02048000
#define IPR_IOASC_SYNC_REQUIRED 0x023f0000
#define IPR_IOASC_MED_DO_NOT_REALLOC 0x03110C00
#define IPR_IOASC_HW_SEL_TIMEOUT 0x04050000
@ -111,6 +112,7 @@
/* Driver data flags */
#define IPR_USE_LONG_TRANSOP_TIMEOUT 0x00000001
#define IPR_USE_PCI_WARM_RESET 0x00000002
#define IPR_DEFAULT_MAX_ERROR_DUMP 984
#define IPR_NUM_LOG_HCAMS 2
@ -179,6 +181,7 @@
#define IPR_SHUTDOWN_TIMEOUT (ipr_fastfail ? 60 * HZ : 10 * 60 * HZ)
#define IPR_VSET_RW_TIMEOUT (ipr_fastfail ? 30 * HZ : 2 * 60 * HZ)
#define IPR_ABBREV_SHUTDOWN_TIMEOUT (10 * HZ)
#define IPR_DUAL_IOA_ABBR_SHUTDOWN_TO (2 * 60 * HZ)
#define IPR_DEVICE_RESET_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
#define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
@ -191,6 +194,7 @@
#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ)
#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
#define IPR_PCI_RESET_TIMEOUT (HZ / 2)
#define IPR_DUMP_TIMEOUT (15 * HZ)
/*
@ -602,6 +606,12 @@ struct ipr_mode_page28 {
struct ipr_dev_bus_entry bus[0];
}__attribute__((packed));
struct ipr_mode_page24 {
struct ipr_mode_page_hdr hdr;
u8 flags;
#define IPR_ENABLE_DUAL_IOA_AF 0x80
}__attribute__((packed));
struct ipr_ioa_vpd {
struct ipr_std_inq_data std_inq_data;
u8 ascii_part_num[12];
@ -624,6 +634,19 @@ struct ipr_inquiry_page3 {
u8 patch_number[4];
}__attribute__((packed));
struct ipr_inquiry_cap {
u8 peri_qual_dev_type;
u8 page_code;
u8 reserved1;
u8 page_length;
u8 ascii_len;
u8 reserved2;
u8 sis_version[2];
u8 cap;
#define IPR_CAP_DUAL_IOA_RAID 0x80
u8 reserved3[15];
}__attribute__((packed));
#define IPR_INQUIRY_PAGE0_ENTRIES 20
struct ipr_inquiry_page0 {
u8 peri_qual_dev_type;
@ -962,6 +985,7 @@ struct ipr_misc_cbs {
struct ipr_ioa_vpd ioa_vpd;
struct ipr_inquiry_page0 page0_data;
struct ipr_inquiry_page3 page3_data;
struct ipr_inquiry_cap cap;
struct ipr_mode_pages mode_pages;
struct ipr_supported_device supp_dev;
};
@ -1068,6 +1092,10 @@ struct ipr_ioa_cfg {
u8 allow_cmds:1;
u8 allow_ml_add_del:1;
u8 needs_hard_reset:1;
u8 dual_raid:1;
u8 needs_warm_reset:1;
u8 revid;
enum ipr_cache_state cache_state;
u16 type; /* CCIN of the card */
@ -1161,6 +1189,7 @@ struct ipr_ioa_cfg {
struct pci_pool *ipr_cmd_pool;
struct ipr_cmnd *reset_cmd;
int (*reset) (struct ipr_cmnd *);
struct ata_host ata_host;
char ipr_cmd_label[8];

View File

@ -23,6 +23,8 @@
*
*/
#include <linux/kthread.h>
#include "sas_internal.h"
#include <scsi/scsi_host.h>
@ -184,7 +186,7 @@ static int sas_queue_up(struct sas_task *task)
list_add_tail(&task->list, &core->task_queue);
core->task_queue_size += 1;
spin_unlock_irqrestore(&core->task_queue_lock, flags);
up(&core->queue_thread_sema);
wake_up_process(core->queue_thread);
return 0;
}
@ -819,7 +821,7 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
struct sas_internal *i = to_sas_internal(core->shost->transportt);
spin_lock_irqsave(&core->task_queue_lock, flags);
while (!core->queue_thread_kill &&
while (!kthread_should_stop() &&
!list_empty(&core->task_queue)) {
can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
@ -858,8 +860,6 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
spin_unlock_irqrestore(&core->task_queue_lock, flags);
}
static DECLARE_COMPLETION(queue_th_comp);
/**
* sas_queue_thread -- The Task Collector thread
* @_sas_ha: pointer to struct sas_ha
@ -867,40 +867,33 @@ static DECLARE_COMPLETION(queue_th_comp);
static int sas_queue_thread(void *_sas_ha)
{
struct sas_ha_struct *sas_ha = _sas_ha;
struct scsi_core *core = &sas_ha->core;
daemonize("sas_queue_%d", core->shost->host_no);
current->flags |= PF_NOFREEZE;
complete(&queue_th_comp);
while (1) {
down_interruptible(&core->queue_thread_sema);
set_current_state(TASK_INTERRUPTIBLE);
schedule();
sas_queue(sas_ha);
if (core->queue_thread_kill)
if (kthread_should_stop())
break;
}
complete(&queue_th_comp);
return 0;
}
int sas_init_queue(struct sas_ha_struct *sas_ha)
{
int res;
struct scsi_core *core = &sas_ha->core;
spin_lock_init(&core->task_queue_lock);
core->task_queue_size = 0;
INIT_LIST_HEAD(&core->task_queue);
init_MUTEX_LOCKED(&core->queue_thread_sema);
res = kernel_thread(sas_queue_thread, sas_ha, 0);
if (res >= 0)
wait_for_completion(&queue_th_comp);
return res < 0 ? res : 0;
core->queue_thread = kthread_run(sas_queue_thread, sas_ha,
"sas_queue_%d", core->shost->host_no);
if (IS_ERR(core->queue_thread))
return PTR_ERR(core->queue_thread);
return 0;
}
void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
@ -909,10 +902,7 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
struct scsi_core *core = &sas_ha->core;
struct sas_task *task, *n;
init_completion(&queue_th_comp);
core->queue_thread_kill = 1;
up(&core->queue_thread_sema);
wait_for_completion(&queue_th_comp);
kthread_stop(core->queue_thread);
if (!list_empty(&core->task_queue))
SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -27,10 +27,6 @@ struct lpfc_sli2_slim;
requests */
#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
the NameServer before giving up. */
#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */
#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
#define LPFC_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@ -244,28 +240,23 @@ struct lpfc_hba {
#define FC_FABRIC 0x100 /* We are fabric attached */
#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/
#define FC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
#define FC_LOADING 0x1000 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2000 /* HBA in process of unloading drvr */
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
#define FC_LOOPBACK_MODE 0x40000 /* NPort is in Loopback mode */
/* This flag is set while issuing */
/* INIT_LINK mailbox command */
#define FC_IGNORE_ERATT 0x80000 /* intr handler should ignore ERATT */
uint32_t fc_topology; /* link topology, from LINK INIT */
struct lpfc_stats fc_stat;
/* These are the head/tail pointers for the bind, plogi, adisc, unmap,
* and map lists. Their counters are immediately following.
*/
struct list_head fc_plogi_list;
struct list_head fc_adisc_list;
struct list_head fc_reglogin_list;
struct list_head fc_prli_list;
struct list_head fc_nlpunmap_list;
struct list_head fc_nlpmap_list;
struct list_head fc_npr_list;
struct list_head fc_unused_list;
struct list_head fc_nodes;
/* Keep counters for the number of entries in each list. */
uint16_t fc_plogi_cnt;
@ -387,13 +378,17 @@ struct lpfc_hba {
mempool_t *mbox_mem_pool;
mempool_t *nlp_mem_pool;
struct list_head freebufList;
struct list_head ctrspbuflist;
struct list_head rnidrspbuflist;
struct fc_host_statistics link_stats;
};
static inline void
lpfc_set_loopback_flag(struct lpfc_hba *phba) {
if (phba->cfg_topology == FLAGS_LOCAL_LB)
phba->fc_flag |= FC_LOOPBACK_MODE;
else
phba->fc_flag &= ~FC_LOOPBACK_MODE;
}
struct rnidrsp {
void *buf;

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -20,6 +20,7 @@
*******************************************************************/
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@ -213,6 +214,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
int mbxstatus = MBXERR_ERROR;
if ((phba->fc_flag & FC_OFFLINE_MODE) ||
(phba->fc_flag & FC_BLOCK_MGMT_IO) ||
(phba->hba_state != LPFC_HBA_READY))
return -EPERM;
@ -235,6 +237,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
phba->fc_ratov * 2);
}
lpfc_set_loopback_flag(phba);
if (mbxstatus == MBX_TIMEOUT)
pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
else
@ -246,19 +249,62 @@ lpfc_issue_lip(struct Scsi_Host *host)
return 0;
}
static int
lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
{
struct completion online_compl;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
int status = 0;
int cnt = 0;
int i;
init_completion(&online_compl);
lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_OFFLINE_PREP);
wait_for_completion(&online_compl);
if (status != 0)
return -EIO;
psli = &phba->sli;
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
/* The linkdown event takes 30 seconds to timeout. */
while (pring->txcmplq_cnt) {
msleep(10);
if (cnt++ > 3000) {
lpfc_printf_log(phba,
KERN_WARNING, LOG_INIT,
"%d:0466 Outstanding IO when "
"bringing Adapter offline\n",
phba->brd_no);
break;
}
}
}
init_completion(&online_compl);
lpfc_workq_post_event(phba, &status, &online_compl, type);
wait_for_completion(&online_compl);
if (status != 0)
return -EIO;
return 0;
}
static int
lpfc_selective_reset(struct lpfc_hba *phba)
{
struct completion online_compl;
int status = 0;
init_completion(&online_compl);
lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_OFFLINE);
wait_for_completion(&online_compl);
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (status != 0)
return -EIO;
return status;
init_completion(&online_compl);
lpfc_workq_post_event(phba, &status, &online_compl,
@ -324,23 +370,19 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0)
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_OFFLINE);
wait_for_completion(&online_compl);
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_WARM_START);
else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_KILL);
status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_KILL);
else
return -EINVAL;
wait_for_completion(&online_compl);
if (!status)
return strlen(buf);
else
@ -645,9 +687,7 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
dev_printk(KERN_NOTICE, &phba->pcidev->dev,
"lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
init_completion(&online_compl);
lpfc_workq_post_event(phba, &stat1, &online_compl, LPFC_EVT_OFFLINE);
wait_for_completion(&online_compl);
stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (stat1)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0463 lpfc_soft_wwpn attribute set failed to reinit "
@ -789,6 +829,18 @@ lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val)
return -EINVAL;
}
static void
lpfc_update_rport_devloss_tmo(struct lpfc_hba *phba)
{
struct lpfc_nodelist *ndlp;
spin_lock_irq(phba->host->host_lock);
list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp)
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = phba->cfg_devloss_tmo;
spin_unlock_irq(phba->host->host_lock);
}
static int
lpfc_nodev_tmo_set(struct lpfc_hba *phba, int val)
{
@ -804,6 +856,7 @@ lpfc_nodev_tmo_set(struct lpfc_hba *phba, int val)
if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
phba->cfg_nodev_tmo = val;
phba->cfg_devloss_tmo = val;
lpfc_update_rport_devloss_tmo(phba);
return 0;
}
@ -839,6 +892,7 @@ lpfc_devloss_tmo_set(struct lpfc_hba *phba, int val)
phba->cfg_nodev_tmo = val;
phba->cfg_devloss_tmo = val;
phba->dev_loss_tmo_changed = 1;
lpfc_update_rport_devloss_tmo(phba);
return 0;
}
@ -931,9 +985,10 @@ LPFC_ATTR_RW(topology, 0, 0, 6, "Select Fibre Channel topology");
# 1 = 1 Gigabaud
# 2 = 2 Gigabaud
# 4 = 4 Gigabaud
# Value range is [0,4]. Default value is 0.
# 8 = 8 Gigabaud
# Value range is [0,8]. Default value is 0.
*/
LPFC_ATTR_R(link_speed, 0, 0, 4, "Select link speed");
LPFC_ATTR_R(link_speed, 0, 0, 8, "Select link speed");
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
@ -958,7 +1013,7 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
/*
# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
# value [0,63]. cr_count can take value [0,255]. Default value of cr_delay
# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
# cr_delay is set to 0.
*/
@ -1227,11 +1282,11 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
int rc;
if (off > sizeof(MAILBOX_t))
if (off > MAILBOX_CMD_SIZE)
return -ERANGE;
if ((count + off) > sizeof(MAILBOX_t))
count = sizeof(MAILBOX_t) - off;
if ((count + off) > MAILBOX_CMD_SIZE)
count = MAILBOX_CMD_SIZE - off;
if (off % 4 || count % 4 || (unsigned long)buf % 4)
return -EINVAL;
@ -1307,6 +1362,12 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
return -EPERM;
}
if (phba->fc_flag & FC_BLOCK_MGMT_IO) {
sysfs_mbox_idle(phba);
spin_unlock_irq(host->host_lock);
return -EAGAIN;
}
if ((phba->fc_flag & FC_OFFLINE_MODE) ||
(!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
@ -1326,6 +1387,11 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
}
if (rc != MBX_SUCCESS) {
if (rc == MBX_TIMEOUT) {
phba->sysfs_mbox.mbox->mbox_cmpl =
lpfc_sli_def_mbox_cmpl;
phba->sysfs_mbox.mbox = NULL;
}
sysfs_mbox_idle(phba);
spin_unlock_irq(host->host_lock);
return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
@ -1344,7 +1410,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
phba->sysfs_mbox.offset = off + count;
if (phba->sysfs_mbox.offset == sizeof(MAILBOX_t))
if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
sysfs_mbox_idle(phba);
spin_unlock_irq(phba->host->host_lock);
@ -1358,7 +1424,7 @@ static struct bin_attribute sysfs_mbox_attr = {
.mode = S_IRUSR | S_IWUSR,
.owner = THIS_MODULE,
},
.size = sizeof(MAILBOX_t),
.size = MAILBOX_CMD_SIZE,
.read = sysfs_mbox_read,
.write = sysfs_mbox_write,
};
@ -1494,6 +1560,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
case LA_4GHZ_LINK:
fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
break;
case LA_8GHZ_LINK:
fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
@ -1546,6 +1615,9 @@ lpfc_get_stats(struct Scsi_Host *shost)
unsigned long seconds;
int rc = 0;
if (phba->fc_flag & FC_BLOCK_MGMT_IO)
return NULL;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq)
return NULL;
@ -1631,6 +1703,8 @@ lpfc_get_stats(struct Scsi_Host *shost)
else
hs->seconds_since_last_reset = seconds - psli->stats_start;
mempool_free(pmboxq, phba->mbox_mem_pool);
return hs;
}
@ -1644,6 +1718,9 @@ lpfc_reset_stats(struct Scsi_Host *shost)
MAILBOX_t *pmb;
int rc = 0;
if (phba->fc_flag & FC_BLOCK_MGMT_IO)
return;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq)
return;
@ -1699,6 +1776,8 @@ lpfc_reset_stats(struct Scsi_Host *shost)
psli->stats_start = get_seconds();
mempool_free(pmboxq, phba->mbox_mem_pool);
return;
}
@ -1706,67 +1785,51 @@ lpfc_reset_stats(struct Scsi_Host *shost)
* The LPFC driver treats linkdown handling as target loss events so there
* are no sysfs handlers for link_down_tmo.
*/
static void
lpfc_get_starget_port_id(struct scsi_target *starget)
static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata;
uint32_t did = -1;
struct lpfc_nodelist *ndlp = NULL;
struct lpfc_nodelist *ndlp;
spin_lock_irq(shost->host_lock);
/* Search the mapped list for this target ID */
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
if (starget->id == ndlp->nlp_sid) {
did = ndlp->nlp_DID;
break;
/* Search for this, mapped, target ID */
list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) {
spin_unlock_irq(shost->host_lock);
return ndlp;
}
}
spin_unlock_irq(shost->host_lock);
return NULL;
}
fc_starget_port_id(starget) = did;
static void
lpfc_get_starget_port_id(struct scsi_target *starget)
{
struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
}
static void
lpfc_get_starget_node_name(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata;
u64 node_name = 0;
struct lpfc_nodelist *ndlp = NULL;
struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
spin_lock_irq(shost->host_lock);
/* Search the mapped list for this target ID */
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
if (starget->id == ndlp->nlp_sid) {
node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
break;
}
}
spin_unlock_irq(shost->host_lock);
fc_starget_node_name(starget) = node_name;
fc_starget_node_name(starget) =
ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
}
static void
lpfc_get_starget_port_name(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata;
u64 port_name = 0;
struct lpfc_nodelist *ndlp = NULL;
struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
spin_lock_irq(shost->host_lock);
/* Search the mapped list for this target ID */
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
if (starget->id == ndlp->nlp_sid) {
port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
break;
}
}
spin_unlock_irq(shost->host_lock);
fc_starget_port_name(starget) = port_name;
fc_starget_port_name(starget) =
ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
}
static void
@ -1895,25 +1958,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
sizeof(struct fcp_rsp) +
(phba->cfg_sg_seg_cnt * sizeof(struct ulp_bde64));
switch (phba->pcidev->device) {
case PCI_DEVICE_ID_LP101:
case PCI_DEVICE_ID_BSMB:
case PCI_DEVICE_ID_ZSMB:
phba->cfg_hba_queue_depth = LPFC_LP101_HBA_Q_DEPTH;
break;
case PCI_DEVICE_ID_RFLY:
case PCI_DEVICE_ID_PFLY:
case PCI_DEVICE_ID_BMID:
case PCI_DEVICE_ID_ZMID:
case PCI_DEVICE_ID_TFLY:
phba->cfg_hba_queue_depth = LPFC_LC_HBA_Q_DEPTH;
break;
default:
phba->cfg_hba_queue_depth = LPFC_DFT_HBA_Q_DEPTH;
}
if (phba->cfg_hba_queue_depth > lpfc_hba_queue_depth)
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
return;
}

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -18,6 +18,8 @@
* included with this package. *
*******************************************************************/
typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param);
struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
@ -43,20 +45,24 @@ void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_nlp_list(struct lpfc_hba *, struct lpfc_nodelist *, int);
void lpfc_dequeue_node(struct lpfc_hba *, struct lpfc_nodelist *);
void lpfc_nlp_set_state(struct lpfc_hba *, struct lpfc_nodelist *, int);
void lpfc_drop_node(struct lpfc_hba *, struct lpfc_nodelist *);
void lpfc_set_disctmo(struct lpfc_hba *);
int lpfc_can_disctmo(struct lpfc_hba *);
int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *);
int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *, struct lpfc_nodelist *);
int lpfc_nlp_remove(struct lpfc_hba *, struct lpfc_nodelist *);
void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t);
struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
int lpfc_nlp_put(struct lpfc_nodelist *);
struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t);
void lpfc_disc_list_loopmap(struct lpfc_hba *);
void lpfc_disc_start(struct lpfc_hba *);
void lpfc_disc_flush_list(struct lpfc_hba *);
void lpfc_disc_timeout(unsigned long);
struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
@ -66,8 +72,7 @@ int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *,
int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *,
struct serv_parm *, uint32_t);
int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp,
int);
int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp);
int lpfc_els_abort_flogi(struct lpfc_hba *);
int lpfc_initial_flogi(struct lpfc_hba *);
int lpfc_issue_els_plogi(struct lpfc_hba *, uint32_t, uint8_t);
@ -113,7 +118,10 @@ void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
int lpfc_online(struct lpfc_hba *);
int lpfc_offline(struct lpfc_hba *);
void lpfc_block_mgmt_io(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
void lpfc_offline_prep(struct lpfc_hba *);
void lpfc_offline(struct lpfc_hba *);
int lpfc_sli_setup(struct lpfc_hba *);
int lpfc_sli_queue_setup(struct lpfc_hba *);
@ -162,8 +170,8 @@ int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
struct lpfc_sli_ring *,
dma_addr_t);
int lpfc_sli_issue_abort_iotag32(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
uint64_t, lpfc_ctx_cmd);
int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
@ -172,9 +180,8 @@ int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
void lpfc_mbox_timeout(unsigned long);
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba *, uint32_t, uint32_t);
struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_hba *, uint32_t,
struct lpfc_name *);
struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba *, uint32_t);
struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_hba *, struct lpfc_name *);
int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
uint32_t timeout);
@ -193,6 +200,9 @@ void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
/* Function prototypes. */
const char* lpfc_info(struct Scsi_Host *);
void lpfc_scan_start(struct Scsi_Host *);
int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
void lpfc_get_cfgparam(struct lpfc_hba *);
int lpfc_alloc_sysfs_attr(struct lpfc_hba *);
void lpfc_free_sysfs_attr(struct lpfc_hba *);

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -334,21 +334,22 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
lpfc_set_disctmo(phba);
Cnt = Size > FCELSSIZE ? FCELSSIZE : Size;
list_add_tail(&head, &mp->list);
list_for_each_entry_safe(mp, next_mp, &head, list) {
mlast = mp;
Cnt = Size > FCELSSIZE ? FCELSSIZE : Size;
Size -= Cnt;
if (!ctptr)
if (!ctptr) {
ctptr = (uint32_t *) mlast->virt;
else
} else
Cnt -= 16; /* subtract length of CT header */
/* Loop through entire NameServer list of DIDs */
while (Cnt) {
while (Cnt >= sizeof (uint32_t)) {
/* Get next DID from NameServer List */
CTentry = *ctptr++;
@ -442,10 +443,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
phba->fc_ns_retry++;
/* CT command is being retried */
ndlp =
lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
NameServer_DID);
if (ndlp) {
ndlp = lpfc_findnode_did(phba, NameServer_DID);
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
0) {
goto out;
@ -729,7 +728,7 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
ndlp = lpfc_findnode_did(phba, FDMI_DID);
if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* FDMI rsp failed */
lpfc_printf_log(phba,
@ -1039,6 +1038,9 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
case LA_4GHZ_LINK:
ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
break;
case LA_8GHZ_LINK:
ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
break;
default:
ae->un.PortSpeed =
HBA_PORTSPEED_UNKNOWN;
@ -1161,7 +1163,7 @@ lpfc_fdmi_tmo_handler(struct lpfc_hba *phba)
{
struct lpfc_nodelist *ndlp;
ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
ndlp = lpfc_findnode_did(phba, FDMI_DID);
if (ndlp) {
if (init_utsname()->nodename[0] != '\0') {
lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -31,6 +31,7 @@
/* worker thread events */
enum lpfc_work_type {
LPFC_EVT_ONLINE,
LPFC_EVT_OFFLINE_PREP,
LPFC_EVT_OFFLINE,
LPFC_EVT_WARM_START,
LPFC_EVT_KILL,
@ -68,7 +69,6 @@ struct lpfc_nodelist {
uint16_t nlp_maxframe; /* Max RCV frame size */
uint8_t nlp_class_sup; /* Supported Classes */
uint8_t nlp_retry; /* used for ELS retries */
uint8_t nlp_disc_refcnt; /* used for DSM */
uint8_t nlp_fcp_info; /* class info, bits 0-3 */
#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
@ -79,20 +79,10 @@ struct lpfc_nodelist {
struct lpfc_work_evt els_retry_evt;
unsigned long last_ramp_up_time; /* jiffy of last ramp up */
unsigned long last_q_full_time; /* jiffy of last queue full */
struct kref kref;
};
/* Defines for nlp_flag (uint32) */
#define NLP_NO_LIST 0x0 /* Indicates immediately free node */
#define NLP_UNUSED_LIST 0x1 /* Flg to indicate node will be freed */
#define NLP_PLOGI_LIST 0x2 /* Flg to indicate sent PLOGI */
#define NLP_ADISC_LIST 0x3 /* Flg to indicate sent ADISC */
#define NLP_REGLOGIN_LIST 0x4 /* Flg to indicate sent REG_LOGIN */
#define NLP_PRLI_LIST 0x5 /* Flg to indicate sent PRLI */
#define NLP_UNMAPPED_LIST 0x6 /* Node is now unmapped */
#define NLP_MAPPED_LIST 0x7 /* Node is now mapped */
#define NLP_NPR_LIST 0x8 /* Node is in NPort Recovery state */
#define NLP_JUST_DQ 0x9 /* just deque ndlp in lpfc_nlp_list */
#define NLP_LIST_MASK 0xf /* mask to see what list node is on */
#define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */
#define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */
#define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */
@ -108,20 +98,8 @@ struct lpfc_nodelist {
ACC */
#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
NPR list */
#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */
#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
/* Defines for list searchs */
#define NLP_SEARCH_MAPPED 0x1 /* search mapped */
#define NLP_SEARCH_UNMAPPED 0x2 /* search unmapped */
#define NLP_SEARCH_PLOGI 0x4 /* search plogi */
#define NLP_SEARCH_ADISC 0x8 /* search adisc */
#define NLP_SEARCH_REGLOGIN 0x10 /* search reglogin */
#define NLP_SEARCH_PRLI 0x20 /* search prli */
#define NLP_SEARCH_NPR 0x40 /* search npr */
#define NLP_SEARCH_UNUSED 0x80 /* search mapped */
#define NLP_SEARCH_ALL 0xff /* search all lists */
/* There are 4 different double linked lists nodelist entries can reside on.
* The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
* when Link Up discovery or Registered State Change Notification (RSCN)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -1078,6 +1078,8 @@ typedef struct {
/* Start FireFly Register definitions */
#define PCI_VENDOR_ID_EMULEX 0x10df
#define PCI_DEVICE_ID_FIREFLY 0x1ae5
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
#define PCI_DEVICE_ID_PFLY 0xf098
#define PCI_DEVICE_ID_LP101 0xf0a1
@ -1089,6 +1091,9 @@ typedef struct {
#define PCI_DEVICE_ID_NEPTUNE 0xf0f5
#define PCI_DEVICE_ID_NEPTUNE_SCSP 0xf0f6
#define PCI_DEVICE_ID_NEPTUNE_DCSP 0xf0f7
#define PCI_DEVICE_ID_SAT 0xf100
#define PCI_DEVICE_ID_SAT_SCSP 0xf111
#define PCI_DEVICE_ID_SAT_DCSP 0xf112
#define PCI_DEVICE_ID_SUPERFLY 0xf700
#define PCI_DEVICE_ID_DRAGONFLY 0xf800
#define PCI_DEVICE_ID_CENTAUR 0xf900
@ -1098,6 +1103,7 @@ typedef struct {
#define PCI_DEVICE_ID_LP10000S 0xfc00
#define PCI_DEVICE_ID_LP11000S 0xfc10
#define PCI_DEVICE_ID_LPE11000S 0xfc20
#define PCI_DEVICE_ID_SAT_S 0xfc40
#define PCI_DEVICE_ID_HELIOS 0xfd00
#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
@ -1118,6 +1124,7 @@ typedef struct {
#define HELIOS_JEDEC_ID 0x0364
#define ZEPHYR_JEDEC_ID 0x0577
#define VIPER_JEDEC_ID 0x4838
#define SATURN_JEDEC_ID 0x1004
#define JEDEC_ID_MASK 0x0FFFF000
#define JEDEC_ID_SHIFT 12
@ -1565,7 +1572,7 @@ typedef struct {
#define LINK_SPEED_1G 1 /* 1 Gigabaud */
#define LINK_SPEED_2G 2 /* 2 Gigabaud */
#define LINK_SPEED_4G 4 /* 4 Gigabaud */
#define LINK_SPEED_8G 8 /* 4 Gigabaud */
#define LINK_SPEED_8G 8 /* 8 Gigabaud */
#define LINK_SPEED_10G 16 /* 10 Gigabaud */
} INIT_LINK_VAR;

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -386,12 +386,12 @@ lpfc_config_port_post(struct lpfc_hba * phba)
* Setup the ring 0 (els) timeout handler
*/
timeout = phba->fc_ratov << 1;
phba->els_tmofunc.expires = jiffies + HZ * timeout;
add_timer(&phba->els_tmofunc);
mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
lpfc_set_loopback_flag(phba);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba,
KERN_ERR,
@ -418,33 +418,6 @@ lpfc_config_port_post(struct lpfc_hba * phba)
return (0);
}
static int
lpfc_discovery_wait(struct lpfc_hba *phba)
{
int i = 0;
while ((phba->hba_state != LPFC_HBA_READY) ||
(phba->num_disc_nodes) || (phba->fc_prli_sent) ||
((phba->fc_map_cnt == 0) && (i<2)) ||
(phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
/* Check every second for 30 retries. */
i++;
if (i > 30) {
return -ETIMEDOUT;
}
if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
/* The link is down. Set linkdown timeout */
return -ETIMEDOUT;
}
/* Delay for 1 second to give discovery time to complete. */
msleep(1000);
}
return 0;
}
/************************************************************************/
/* */
/* lpfc_hba_down_prep */
@ -550,12 +523,15 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
* There was a firmware error. Take the hba offline and then
* attempt to restart it.
*/
lpfc_offline_prep(phba);
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
if (lpfc_online(phba) == 0) { /* Initialize the HBA */
mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
lpfc_unblock_mgmt_io(phba);
return;
}
lpfc_unblock_mgmt_io(phba);
} else {
/* The if clause above forces this code path when the status
* failure is a value other than FFER6. Do not call the offline
@ -573,7 +549,9 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
lpfc_offline_prep(phba);
lpfc_offline(phba);
lpfc_unblock_mgmt_io(phba);
phba->hba_state = LPFC_HBA_ERROR;
lpfc_hba_down_post(phba);
}
@ -633,7 +611,7 @@ lpfc_handle_latt_free_mbuf:
lpfc_handle_latt_free_mp:
kfree(mp);
lpfc_handle_latt_free_pmb:
kfree(pmb);
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_handle_latt_err_exit:
/* Enable Link attention interrupts */
spin_lock_irq(phba->host->host_lock);
@ -925,6 +903,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
m = (typeof(m)){"LPe11000-S", max_speed,
"PCIe"};
break;
case PCI_DEVICE_ID_SAT:
m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_SAT_MID:
m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_SAT_SMB:
m = (typeof(m)){"LPe121", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_SAT_DCSP:
m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_SAT_SCSP:
m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_SAT_S:
m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
break;
default:
m = (typeof(m)){ NULL };
break;
@ -1174,69 +1170,17 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
}
static void
lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind)
lpfc_cleanup(struct lpfc_hba * phba)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
/* clean up phba - lpfc specific */
lpfc_can_disctmo(phba);
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
nlp_listp) {
lpfc_nlp_remove(phba, ndlp);
}
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp)
lpfc_nlp_put(ndlp);
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
nlp_listp) {
lpfc_nlp_remove(phba, ndlp);
}
INIT_LIST_HEAD(&phba->fc_nodes);
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
nlp_listp) {
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
}
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
nlp_listp) {
lpfc_nlp_remove(phba, ndlp);
}
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
nlp_listp) {
lpfc_nlp_remove(phba, ndlp);
}
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list,
nlp_listp) {
lpfc_nlp_remove(phba, ndlp);
}
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
nlp_listp) {
lpfc_nlp_remove(phba, ndlp);
}
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
nlp_listp) {
lpfc_nlp_remove(phba, ndlp);
}
INIT_LIST_HEAD(&phba->fc_nlpmap_list);
INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
INIT_LIST_HEAD(&phba->fc_unused_list);
INIT_LIST_HEAD(&phba->fc_plogi_list);
INIT_LIST_HEAD(&phba->fc_adisc_list);
INIT_LIST_HEAD(&phba->fc_reglogin_list);
INIT_LIST_HEAD(&phba->fc_prli_list);
INIT_LIST_HEAD(&phba->fc_npr_list);
phba->fc_map_cnt = 0;
phba->fc_unmap_cnt = 0;
phba->fc_plogi_cnt = 0;
phba->fc_adisc_cnt = 0;
phba->fc_reglogin_cnt = 0;
phba->fc_prli_cnt = 0;
phba->fc_npr_cnt = 0;
phba->fc_unused_cnt= 0;
return;
}
@ -1262,21 +1206,6 @@ lpfc_stop_timer(struct lpfc_hba * phba)
{
struct lpfc_sli *psli = &phba->sli;
/* Instead of a timer, this has been converted to a
* deferred procedding list.
*/
while (!list_empty(&phba->freebufList)) {
struct lpfc_dmabuf *mp = NULL;
list_remove_head((&phba->freebufList), mp,
struct lpfc_dmabuf, list);
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
}
del_timer_sync(&phba->fcp_poll_timer);
del_timer_sync(&phba->fc_estabtmo);
del_timer_sync(&phba->fc_disctmo);
@ -1302,60 +1231,76 @@ lpfc_online(struct lpfc_hba * phba)
"%d:0458 Bring Adapter online\n",
phba->brd_no);
if (!lpfc_sli_queue_setup(phba))
return 1;
lpfc_block_mgmt_io(phba);
if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */
if (!lpfc_sli_queue_setup(phba)) {
lpfc_unblock_mgmt_io(phba);
return 1;
}
if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */
lpfc_unblock_mgmt_io(phba);
return 1;
}
spin_lock_irq(phba->host->host_lock);
phba->fc_flag &= ~FC_OFFLINE_MODE;
spin_unlock_irq(phba->host->host_lock);
lpfc_unblock_mgmt_io(phba);
return 0;
}
int
lpfc_offline(struct lpfc_hba * phba)
void
lpfc_block_mgmt_io(struct lpfc_hba * phba)
{
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
unsigned long iflag;
int i;
int cnt = 0;
if (!phba)
return 0;
spin_lock_irqsave(phba->host->host_lock, iflag);
phba->fc_flag |= FC_BLOCK_MGMT_IO;
spin_unlock_irqrestore(phba->host->host_lock, iflag);
}
void
lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
{
unsigned long iflag;
spin_lock_irqsave(phba->host->host_lock, iflag);
phba->fc_flag &= ~FC_BLOCK_MGMT_IO;
spin_unlock_irqrestore(phba->host->host_lock, iflag);
}
void
lpfc_offline_prep(struct lpfc_hba * phba)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
if (phba->fc_flag & FC_OFFLINE_MODE)
return 0;
return;
psli = &phba->sli;
lpfc_block_mgmt_io(phba);
lpfc_linkdown(phba);
/* Issue an unreg_login to all nodes */
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp)
if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
lpfc_unreg_rpi(phba, ndlp);
lpfc_sli_flush_mbox_queue(phba);
}
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
/* The linkdown event takes 30 seconds to timeout. */
while (pring->txcmplq_cnt) {
mdelay(10);
if (cnt++ > 3000) {
lpfc_printf_log(phba,
KERN_WARNING, LOG_INIT,
"%d:0466 Outstanding IO when "
"bringing Adapter offline\n",
phba->brd_no);
break;
}
}
}
void
lpfc_offline(struct lpfc_hba * phba)
{
unsigned long iflag;
if (phba->fc_flag & FC_OFFLINE_MODE)
return;
/* stop all timers associated with this hba */
lpfc_stop_timer(phba);
phba->work_hba_events = 0;
phba->work_ha = 0;
lpfc_printf_log(phba,
KERN_WARNING,
@ -1366,11 +1311,12 @@ lpfc_offline(struct lpfc_hba * phba)
/* Bring down the SLI Layer and cleanup. The HBA is offline
now. */
lpfc_sli_hba_down(phba);
lpfc_cleanup(phba, 1);
lpfc_cleanup(phba);
spin_lock_irqsave(phba->host->host_lock, iflag);
phba->work_hba_events = 0;
phba->work_ha = 0;
phba->fc_flag |= FC_OFFLINE_MODE;
spin_unlock_irqrestore(phba->host->host_lock, iflag);
return 0;
}
/******************************************************************************
@ -1407,6 +1353,156 @@ lpfc_scsi_free(struct lpfc_hba * phba)
return 0;
}
void lpfc_remove_device(struct lpfc_hba *phba)
{
unsigned long iflag;
lpfc_free_sysfs_attr(phba);
spin_lock_irqsave(phba->host->host_lock, iflag);
phba->fc_flag |= FC_UNLOADING;
spin_unlock_irqrestore(phba->host->host_lock, iflag);
fc_remove_host(phba->host);
scsi_remove_host(phba->host);
kthread_stop(phba->worker_thread);
/*
* Bring down the SLI Layer. This step disable all interrupts,
* clears the rings, discards all mailbox commands, and resets
* the HBA.
*/
lpfc_sli_hba_down(phba);
lpfc_sli_brdrestart(phba);
/* Release the irq reservation */
free_irq(phba->pcidev->irq, phba);
pci_disable_msi(phba->pcidev);
lpfc_cleanup(phba);
lpfc_stop_timer(phba);
phba->work_hba_events = 0;
/*
* Call scsi_free before mem_free since scsi bufs are released to their
* corresponding pools here.
*/
lpfc_scsi_free(phba);
lpfc_mem_free(phba);
/* Free resources associated with SLI2 interface */
dma_free_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
phba->slim2p, phba->slim2p_mapping);
/* unmap adapter SLIM and Control Registers */
iounmap(phba->ctrl_regs_memmap_p);
iounmap(phba->slim_memmap_p);
pci_release_regions(phba->pcidev);
pci_disable_device(phba->pcidev);
idr_remove(&lpfc_hba_index, phba->brd_no);
scsi_host_put(phba->host);
}
void lpfc_scan_start(struct Scsi_Host *host)
{
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
if (lpfc_alloc_sysfs_attr(phba))
goto error;
phba->MBslimaddr = phba->slim_memmap_p;
phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
if (lpfc_sli_hba_setup(phba))
goto error;
/*
* hba setup may have changed the hba_queue_depth so we need to adjust
* the value of can_queue.
*/
host->can_queue = phba->cfg_hba_queue_depth - 10;
return;
error:
lpfc_remove_device(phba);
}
int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
if (!phba->host)
return 1;
if (time >= 30 * HZ)
goto finished;
if (phba->hba_state != LPFC_HBA_READY)
return 0;
if (phba->num_disc_nodes || phba->fc_prli_sent)
return 0;
if ((phba->fc_map_cnt == 0) && (time < 2 * HZ))
return 0;
if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)
return 0;
if ((phba->hba_state > LPFC_LINK_DOWN) || (time < 15 * HZ))
return 0;
finished:
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
spin_lock_irq(shost->host_lock);
lpfc_poll_start_timer(phba);
spin_unlock_irq(shost->host_lock);
}
/*
* set fixed host attributes
* Must done after lpfc_sli_hba_setup()
*/
fc_host_node_name(shost) = wwn_to_u64(phba->fc_nodename.u.wwn);
fc_host_port_name(shost) = wwn_to_u64(phba->fc_portname.u.wwn);
fc_host_supported_classes(shost) = FC_COS_CLASS3;
memset(fc_host_supported_fc4s(shost), 0,
sizeof(fc_host_supported_fc4s(shost)));
fc_host_supported_fc4s(shost)[2] = 1;
fc_host_supported_fc4s(shost)[7] = 1;
lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
fc_host_supported_speeds(shost) = 0;
if (phba->lmt & LMT_10Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
if (phba->lmt & LMT_4Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
if (phba->lmt & LMT_2Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
if (phba->lmt & LMT_1Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
fc_host_maxframe_size(shost) =
((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
(uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
/* This value is also unchanging */
memset(fc_host_active_fc4s(shost), 0,
sizeof(fc_host_active_fc4s(shost)));
fc_host_active_fc4s(shost)[2] = 1;
fc_host_active_fc4s(shost)[7] = 1;
spin_lock_irq(shost->host_lock);
phba->fc_flag &= ~FC_LOADING;
spin_unlock_irq(shost->host_lock);
return 1;
}
static int __devinit
lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
@ -1445,9 +1541,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_put_host;
host->unique_id = phba->brd_no;
INIT_LIST_HEAD(&phba->ctrspbuflist);
INIT_LIST_HEAD(&phba->rnidrspbuflist);
INIT_LIST_HEAD(&phba->freebufList);
/* Initialize timers used by driver */
init_timer(&phba->fc_estabtmo);
@ -1482,16 +1575,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
host->max_lun = phba->cfg_max_luns;
host->this_id = -1;
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&phba->fc_nlpmap_list);
INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
INIT_LIST_HEAD(&phba->fc_unused_list);
INIT_LIST_HEAD(&phba->fc_plogi_list);
INIT_LIST_HEAD(&phba->fc_adisc_list);
INIT_LIST_HEAD(&phba->fc_reglogin_list);
INIT_LIST_HEAD(&phba->fc_prli_list);
INIT_LIST_HEAD(&phba->fc_npr_list);
INIT_LIST_HEAD(&phba->fc_nodes);
pci_set_master(pdev);
retval = pci_set_mwi(pdev);
@ -1609,13 +1693,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
host->transportt = lpfc_transport_template;
pci_set_drvdata(pdev, host);
error = scsi_add_host(host, &pdev->dev);
if (error)
goto out_kthread_stop;
error = lpfc_alloc_sysfs_attr(phba);
if (error)
goto out_remove_host;
if (phba->cfg_use_msi) {
error = pci_enable_msi(phba->pcidev);
@ -1631,73 +1708,15 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0451 Enable interrupt handler failed\n",
phba->brd_no);
goto out_free_sysfs_attr;
goto out_kthread_stop;
}
phba->MBslimaddr = phba->slim_memmap_p;
phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
error = lpfc_sli_hba_setup(phba);
if (error) {
error = -ENODEV;
error = scsi_add_host(host, &pdev->dev);
if (error)
goto out_free_irq;
}
/*
* hba setup may have changed the hba_queue_depth so we need to adjust
* the value of can_queue.
*/
host->can_queue = phba->cfg_hba_queue_depth - 10;
scsi_scan_host(host);
lpfc_discovery_wait(phba);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
spin_lock_irq(phba->host->host_lock);
lpfc_poll_start_timer(phba);
spin_unlock_irq(phba->host->host_lock);
}
/*
* set fixed host attributes
* Must done after lpfc_sli_hba_setup()
*/
fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn);
fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn);
fc_host_supported_classes(host) = FC_COS_CLASS3;
memset(fc_host_supported_fc4s(host), 0,
sizeof(fc_host_supported_fc4s(host)));
fc_host_supported_fc4s(host)[2] = 1;
fc_host_supported_fc4s(host)[7] = 1;
lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host));
fc_host_supported_speeds(host) = 0;
if (phba->lmt & LMT_10Gb)
fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT;
if (phba->lmt & LMT_4Gb)
fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT;
if (phba->lmt & LMT_2Gb)
fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT;
if (phba->lmt & LMT_1Gb)
fc_host_supported_speeds(host) |= FC_PORTSPEED_1GBIT;
fc_host_maxframe_size(host) =
((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
(uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
/* This value is also unchanging */
memset(fc_host_active_fc4s(host), 0,
sizeof(fc_host_active_fc4s(host)));
fc_host_active_fc4s(host)[2] = 1;
fc_host_active_fc4s(host)[7] = 1;
spin_lock_irq(phba->host->host_lock);
phba->fc_flag &= ~FC_LOADING;
spin_unlock_irq(phba->host->host_lock);
return 0;
out_free_irq:
@ -1705,11 +1724,6 @@ out_free_irq:
phba->work_hba_events = 0;
free_irq(phba->pcidev->irq, phba);
pci_disable_msi(phba->pcidev);
out_free_sysfs_attr:
lpfc_free_sysfs_attr(phba);
out_remove_host:
fc_remove_host(phba->host);
scsi_remove_host(phba->host);
out_kthread_stop:
kthread_stop(phba->worker_thread);
out_free_iocbq:
@ -1747,56 +1761,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
unsigned long iflag;
lpfc_free_sysfs_attr(phba);
spin_lock_irqsave(phba->host->host_lock, iflag);
phba->fc_flag |= FC_UNLOADING;
spin_unlock_irqrestore(phba->host->host_lock, iflag);
fc_remove_host(phba->host);
scsi_remove_host(phba->host);
kthread_stop(phba->worker_thread);
/*
* Bring down the SLI Layer. This step disable all interrupts,
* clears the rings, discards all mailbox commands, and resets
* the HBA.
*/
lpfc_sli_hba_down(phba);
lpfc_sli_brdrestart(phba);
/* Release the irq reservation */
free_irq(phba->pcidev->irq, phba);
pci_disable_msi(phba->pcidev);
lpfc_cleanup(phba, 0);
lpfc_stop_timer(phba);
phba->work_hba_events = 0;
/*
* Call scsi_free before mem_free since scsi bufs are released to their
* corresponding pools here.
*/
lpfc_scsi_free(phba);
lpfc_mem_free(phba);
/* Free resources associated with SLI2 interface */
dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
phba->slim2p, phba->slim2p_mapping);
/* unmap adapter SLIM and Control Registers */
iounmap(phba->ctrl_regs_memmap_p);
iounmap(phba->slim_memmap_p);
pci_release_regions(phba->pcidev);
pci_disable_device(phba->pcidev);
idr_remove(&lpfc_hba_index, phba->brd_no);
scsi_host_put(phba->host);
lpfc_remove_device(phba);
pci_set_drvdata(pdev, NULL);
}
@ -1941,6 +1907,18 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -212,6 +212,7 @@ lpfc_init_link(struct lpfc_hba * phba,
case LINK_SPEED_1G:
case LINK_SPEED_2G:
case LINK_SPEED_4G:
case LINK_SPEED_8G:
mb->un.varInitLnk.link_flags |=
FLAGS_LINK_SPEED;
mb->un.varInitLnk.link_speed = linkspeed;

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -168,14 +168,13 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
* routine effectively results in a "software abort".
*/
int
lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
int send_abts)
lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
{
LIST_HEAD(completions);
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
IOCB_t *icmd;
int found = 0;
IOCB_t *cmd;
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
@ -188,75 +187,39 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
pring = &psli->ring[LPFC_ELS_RING];
/* First check the txq */
do {
found = 0;
spin_lock_irq(phba->host->host_lock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
/* Check to see if iocb matches the nport we are looking
for */
if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) {
found = 1;
/* It matches, so deque and call compl with an
error */
list_del(&iocb->list);
pring->txq_cnt--;
if (iocb->iocb_cmpl) {
icmd = &iocb->iocb;
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
spin_unlock_irq(phba->host->host_lock);
(iocb->iocb_cmpl) (phba, iocb, iocb);
spin_lock_irq(phba->host->host_lock);
} else
lpfc_sli_release_iocbq(phba, iocb);
break;
}
spin_lock_irq(phba->host->host_lock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
/* Check to see if iocb matches the nport we are looking
for */
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
/* It matches, so deque and call compl with an
error */
list_move_tail(&iocb->list, &completions);
pring->txq_cnt--;
}
spin_unlock_irq(phba->host->host_lock);
} while (found);
}
/* Everything on txcmplq will be returned by firmware
* with a no rpi / linkdown / abort error. For ring 0,
* ELS discovery, we want to get rid of it right here.
*/
/* Next check the txcmplq */
do {
found = 0;
spin_lock_irq(phba->host->host_lock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
list) {
/* Check to see if iocb matches the nport we are looking
for */
if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) {
found = 1;
/* It matches, so deque and call compl with an
error */
list_del(&iocb->list);
pring->txcmplq_cnt--;
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
/* Check to see if iocb matches the nport we are looking
for */
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
}
spin_unlock_irq(phba->host->host_lock);
icmd = &iocb->iocb;
/* If the driver is completing an ELS
* command early, flush it out of the firmware.
*/
if (send_abts &&
(icmd->ulpCommand == CMD_ELS_REQUEST64_CR) &&
(icmd->un.elsreq64.bdl.ulpIoTag32)) {
lpfc_sli_issue_abort_iotag32(phba,
pring, iocb);
}
if (iocb->iocb_cmpl) {
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
spin_unlock_irq(phba->host->host_lock);
(iocb->iocb_cmpl) (phba, iocb, iocb);
spin_lock_irq(phba->host->host_lock);
} else
lpfc_sli_release_iocbq(phba, iocb);
break;
}
}
spin_unlock_irq(phba->host->host_lock);
} while(found);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
list_del(&iocb->list);
if (iocb->iocb_cmpl) {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(iocb->iocb_cmpl) (phba, iocb, iocb);
} else
lpfc_sli_release_iocbq(phba, iocb);
}
/* If we are delaying issuing an ELS command, cancel it */
if (ndlp->nlp_flag & NLP_DELAY_TMO)
@ -390,7 +353,10 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
* queue this mbox command to be processed later.
*/
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
mbox->context2 = ndlp;
/*
* mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
* command issued in lpfc_cmpl_els_acc().
*/
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
/*
@ -404,7 +370,7 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
*/
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp, 1);
lpfc_els_abort(phba, ndlp);
}
lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
@ -471,8 +437,7 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
spin_unlock_irq(phba->host->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
return 0;
}
@ -502,12 +467,10 @@ lpfc_rcv_logo(struct lpfc_hba * phba,
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
} else {
ndlp->nlp_prev_state = ndlp->nlp_state;
ndlp->nlp_state = NLP_STE_UNUSED_NODE;
lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
}
spin_lock_irq(phba->host->host_lock);
@ -601,11 +564,10 @@ lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
ndlp->nlp_state = NLP_STE_UNUSED_NODE;
lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
@ -614,7 +576,7 @@ lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
lpfc_issue_els_logo(phba, ndlp, 0);
lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
@ -630,7 +592,7 @@ lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
ndlp->nlp_flag |= NLP_LOGO_ACC;
spin_unlock_irq(phba->host->host_lock);
lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
@ -639,7 +601,7 @@ static uint32_t
lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
@ -647,7 +609,7 @@ static uint32_t
lpfc_device_rm_unused_node(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
@ -697,7 +659,7 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp, 1);
lpfc_els_abort(phba, ndlp);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
@ -712,7 +674,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp, 1);
lpfc_els_abort(phba, ndlp);
if (evt == NLP_EVT_RCV_LOGO) {
lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
@ -727,8 +689,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
spin_unlock_irq(phba->host->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@ -803,32 +764,26 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
goto out;
lpfc_unreg_rpi(phba, ndlp);
if (lpfc_reg_login
(phba, irsp->un.elsreq64.remoteID,
(uint8_t *) sp, mbox, 0) == 0) {
if (lpfc_reg_login(phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp,
mbox, 0) == 0) {
switch (ndlp->nlp_DID) {
case NameServer_DID:
mbox->mbox_cmpl =
lpfc_mbx_cmpl_ns_reg_login;
mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
break;
case FDMI_DID:
mbox->mbox_cmpl =
lpfc_mbx_cmpl_fdmi_reg_login;
mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
break;
default:
mbox->mbox_cmpl =
lpfc_mbx_cmpl_reg_login;
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
}
mbox->context2 = ndlp;
mbox->context2 = lpfc_nlp_get(ndlp);
if (lpfc_sli_issue_mbox(phba, mbox,
(MBX_NOWAIT | MBX_STOP_IOCB))
!= MBX_NOT_FINISHED) {
ndlp->nlp_state =
NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_list(phba, ndlp,
NLP_REGLOGIN_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE);
return ndlp->nlp_state;
}
lpfc_nlp_put(ndlp);
mp = (struct lpfc_dmabuf *)mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@ -841,7 +796,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
out:
/* Free this node since the driver cannot login or has the wrong
sparm */
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
@ -855,9 +810,9 @@ lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
}
else {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp, 1);
lpfc_els_abort(phba, ndlp);
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
}
@ -868,11 +823,10 @@ lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
uint32_t evt)
{
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp, 1);
lpfc_els_abort(phba, ndlp);
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
@ -888,7 +842,7 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
struct lpfc_iocbq *cmdiocb;
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp, 1);
lpfc_els_abort(phba, ndlp);
cmdiocb = (struct lpfc_iocbq *) arg;
@ -896,8 +850,7 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
return ndlp->nlp_state;
}
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
return ndlp->nlp_state;
@ -926,7 +879,7 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp, 0);
lpfc_els_abort(phba, ndlp);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
@ -987,20 +940,17 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
lpfc_unreg_rpi(phba, ndlp);
return ndlp->nlp_state;
}
if (ndlp->nlp_type & NLP_FCP_TARGET) {
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
ndlp->nlp_state = NLP_STE_MAPPED_NODE;
lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE);
} else {
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
}
return ndlp->nlp_state;
}
@ -1016,9 +966,9 @@ lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
}
else {
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp, 1);
lpfc_els_abort(phba, ndlp);
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
}
@ -1029,11 +979,10 @@ lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
uint32_t evt)
{
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp, 1);
lpfc_els_abort(phba, ndlp);
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_flag |= NLP_NPR_ADISC;
@ -1074,9 +1023,36 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
uint32_t evt)
{
struct lpfc_iocbq *cmdiocb;
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
struct lpfc_dmabuf *mp;
cmdiocb = (struct lpfc_iocbq *) arg;
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mb->context2 = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
spin_lock_irq(phba->host->host_lock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
list_del(&mb->list);
mempool_free(mb, phba->mbox_mem_pool);
}
}
spin_unlock_irq(phba->host->host_lock);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@ -1133,8 +1109,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
*/
if (mb->mbxStatus == MBXERR_RPI_FULL) {
ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
ndlp->nlp_state = NLP_STE_UNUSED_NODE;
lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
@ -1147,8 +1122,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
lpfc_issue_els_logo(phba, ndlp, 0);
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@ -1157,13 +1131,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
/* Only if we are not a fabric nport do we issue PRLI */
if (!(ndlp->nlp_type & NLP_FABRIC)) {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(phba, ndlp, 0);
} else {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
}
return ndlp->nlp_state;
}
@ -1178,7 +1150,7 @@ lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
return ndlp->nlp_state;
}
else {
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
}
@ -1189,8 +1161,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
uint32_t evt)
{
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
@ -1230,7 +1201,7 @@ lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
/* Software abort outstanding PRLI before sending acc */
lpfc_els_abort(phba, ndlp, 1);
lpfc_els_abort(phba, ndlp);
lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
@ -1279,8 +1250,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
return ndlp->nlp_state;
}
@ -1298,8 +1268,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
}
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
ndlp->nlp_state = NLP_STE_MAPPED_NODE;
lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE);
return ndlp->nlp_state;
}
@ -1330,9 +1299,9 @@ lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
}
else {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp, 1);
lpfc_els_abort(phba, ndlp);
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
}
@ -1359,11 +1328,10 @@ lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
/* software abort outstanding PRLI */
lpfc_els_abort(phba, ndlp, 1);
lpfc_els_abort(phba, ndlp);
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
@ -1436,8 +1404,7 @@ lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
lpfc_disc_set_adisc(phba, ndlp);
@ -1518,8 +1485,7 @@ lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
uint32_t evt)
{
ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
@ -1551,8 +1517,7 @@ lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
/* send PLOGI immediately, move to PLOGI issue state */
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
}
@ -1580,16 +1545,13 @@ lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(phba->host->host_lock);
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
lpfc_issue_els_adisc(phba, ndlp, 0);
} else {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
}
}
return ndlp->nlp_state;
}
@ -1627,13 +1589,11 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
!(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
lpfc_issue_els_adisc(phba, ndlp, 0);
} else {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
}
}
@ -1682,7 +1642,7 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
@ -1700,7 +1660,7 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
@ -1728,7 +1688,7 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
@ -1749,7 +1709,7 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
ndlp->nlp_rpi = mb->un.varWords[0];
else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
}
@ -1765,7 +1725,7 @@ lpfc_device_rm_npr_node(struct lpfc_hba * phba,
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
return ndlp->nlp_state;
}
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
lpfc_drop_node(phba, ndlp);
return NLP_STE_FREED_NODE;
}
@ -1964,7 +1924,7 @@ lpfc_disc_state_machine(struct lpfc_hba * phba,
uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
uint32_t);
ndlp->nlp_disc_refcnt++;
lpfc_nlp_get(ndlp);
cur_state = ndlp->nlp_state;
/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
@ -1987,18 +1947,7 @@ lpfc_disc_state_machine(struct lpfc_hba * phba,
phba->brd_no,
rc, ndlp->nlp_DID, ndlp->nlp_flag);
ndlp->nlp_disc_refcnt--;
lpfc_nlp_put(ndlp);
/* Check to see if ndlp removal is deferred */
if ((ndlp->nlp_disc_refcnt == 0)
&& (ndlp->nlp_flag & NLP_DELAY_REMOVE)) {
spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~NLP_DELAY_REMOVE;
spin_unlock_irq(phba->host->host_lock);
lpfc_nlp_remove(phba, ndlp);
return NLP_STE_FREED_NODE;
}
if (rc == NLP_STE_FREED_NODE)
return NLP_STE_FREED_NODE;
return rc;
}

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -146,6 +146,10 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
if (lpfc_cmd) {
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->nonsg_phys = 0;
}
spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
return lpfc_cmd;
}
@ -288,13 +292,13 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
}
static void
lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
{
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
uint32_t *lp;
@ -355,6 +359,24 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
/*
* If there is an under run check if under run reported by
* storage array is same as the under run reported by HBA.
* If this is not same, there is a dropped frame.
*/
if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
fcpi_parm &&
(cmnd->resid != fcpi_parm)) {
lpfc_printf_log(phba, KERN_WARNING,
LOG_FCP | LOG_FCP_ERROR,
"%d:0735 FCP Read Check Error and Underrun "
"Data: x%x x%x x%x x%x\n", phba->brd_no,
be32_to_cpu(fcpcmd->fcpDl),
cmnd->resid,
fcpi_parm, cmnd->cmnd[0]);
cmnd->resid = cmnd->request_bufflen;
host_status = DID_ERROR;
}
/*
* The cmnd->underflow is the minimum number of bytes that must
* be transfered for this command. Provided a sense condition
@ -435,7 +457,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
switch (lpfc_cmd->status) {
case IOSTAT_FCP_RSP_ERROR:
/* Call FCP RSP handler to determine result */
lpfc_handle_fcp_err(lpfc_cmd);
lpfc_handle_fcp_err(lpfc_cmd,pIocbOut);
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
@ -466,10 +488,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
result = cmd->result;
sdev = cmd->device;
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
cmd->scsi_done(cmd);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
}
@ -527,7 +549,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
@ -670,6 +691,18 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
return (1);
}
static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
struct lpfc_iocbq *rspiocbq)
{
struct lpfc_scsi_buf *lpfc_cmd =
(struct lpfc_scsi_buf *) cmdiocbq->context1;
if (lpfc_cmd)
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
}
static int
lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
unsigned tgt_id, unsigned int lun,
@ -706,8 +739,9 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
&phba->sli.ring[phba->sli.fcp_ring],
iocbq, iocbqrsp, lpfc_cmd->timeout);
if (ret != IOCB_SUCCESS) {
if (ret == IOCB_TIMEDOUT)
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
ret = FAILED;
} else {
ret = SUCCESS;
lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
@ -974,7 +1008,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
}
static int
lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
@ -984,6 +1018,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
struct lpfc_nodelist *pnode = rdata->pnode;
uint32_t cmd_result = 0, cmd_status = 0;
int ret = FAILED;
int iocb_status = IOCB_SUCCESS;
int cnt, loopcnt;
lpfc_block_error_handler(cmnd);
@ -995,7 +1030,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
*/
while ( 1 ) {
if (!pnode)
return FAILED;
goto out;
if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
spin_unlock_irq(phba->host->host_lock);
@ -1013,7 +1048,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
}
pnode = rdata->pnode;
if (!pnode)
return FAILED;
goto out;
}
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
break;
@ -1028,7 +1063,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
lpfc_cmd->rdata = rdata;
ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,
FCP_LUN_RESET);
FCP_TARGET_RESET);
if (!ret)
goto out_free_scsi_buf;
@ -1040,16 +1075,21 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
goto out_free_scsi_buf;
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d:0703 Issue LUN Reset to TGT %d LUN %d "
"Data: x%x x%x\n", phba->brd_no, cmnd->device->id,
"%d:0703 Issue target reset to TGT %d LUN %d rpi x%x "
"nlp_flag x%x\n", phba->brd_no, cmnd->device->id,
cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
ret = lpfc_sli_issue_iocb_wait(phba,
iocb_status = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring],
iocbq, iocbqrsp, lpfc_cmd->timeout);
if (ret == IOCB_SUCCESS)
ret = SUCCESS;
if (iocb_status == IOCB_TIMEDOUT)
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
if (iocb_status == IOCB_SUCCESS)
ret = SUCCESS;
else
ret = iocb_status;
cmd_result = iocbqrsp->iocb.un.ulpWord[4];
cmd_status = iocbqrsp->iocb.ulpStatus;
@ -1087,18 +1127,19 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
if (cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
"%d:0719 device reset I/O flush failure: cnt x%x\n",
phba->brd_no, cnt);
ret = FAILED;
}
out_free_scsi_buf:
lpfc_release_scsi_buf(phba, lpfc_cmd);
if (iocb_status != IOCB_TIMEDOUT) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0713 SCSI layer issued LUN reset (%d, %d) "
"Data: x%x x%x x%x\n",
phba->brd_no, cmnd->device->id,cmnd->device->lun,
"%d:0713 SCSI layer issued device reset (%d, %d) "
"return x%x status x%x result x%x\n",
phba->brd_no, cmnd->device->id, cmnd->device->lun,
ret, cmd_status, cmd_result);
out:
@ -1107,7 +1148,7 @@ out:
}
static int
lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
@ -1134,10 +1175,12 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
* fail, this routine returns failure to the midlayer.
*/
for (i = 0; i < LPFC_MAX_TARGET; i++) {
/* Search the mapped list for this target ID */
/* Search for mapped node by target ID */
match = 0;
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
if ((i == ndlp->nlp_sid) && ndlp->rport) {
list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
i == ndlp->nlp_sid &&
ndlp->rport) {
match = 1;
break;
}
@ -1152,13 +1195,17 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
"%d:0700 Bus Reset on target %d failed\n",
phba->brd_no, i);
err_count++;
break;
}
}
if (ret != IOCB_TIMEDOUT)
lpfc_release_scsi_buf(phba, lpfc_cmd);
if (err_count == 0)
ret = SUCCESS;
lpfc_release_scsi_buf(phba, lpfc_cmd);
else
ret = FAILED;
/*
* All outstanding txcmplq I/Os should have been aborted by
@ -1299,11 +1346,13 @@ struct scsi_host_template lpfc_template = {
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler= lpfc_reset_lun_handler,
.eh_bus_reset_handler = lpfc_reset_bus_handler,
.eh_device_reset_handler= lpfc_device_reset_handler,
.eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
.scan_start = lpfc_scan_start,
.this_id = -1,
.sg_tablesize = LPFC_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -528,6 +528,7 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
* If pdone_q is empty, the driver thread gave up waiting and
* continued running.
*/
pmboxq->mbox_flag |= LPFC_MBX_WAKE;
pdone_q = (wait_queue_head_t *) pmboxq->context1;
if (pdone_q)
wake_up_interruptible(pdone_q);
@ -538,11 +539,32 @@ void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
struct lpfc_dmabuf *mp;
uint16_t rpi;
int rc;
mp = (struct lpfc_dmabuf *) (pmb->context1);
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
/*
* If a REG_LOGIN succeeded after node is destroyed or node
* is in re-discovery driver need to cleanup the RPI.
*/
if (!(phba->fc_flag & FC_UNLOADING) &&
(pmb->mb.mbxCommand == MBX_REG_LOGIN64) &&
(!pmb->mb.mbxStatus)) {
rpi = pmb->mb.un.varWords[0];
lpfc_unreg_login(phba, rpi, pmb);
pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_NOT_FINISHED)
return;
}
mempool_free( pmb, phba->mbox_mem_pool);
return;
}
@ -693,25 +715,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
} else {
spin_unlock_irq(phba->host->host_lock);
/* Turn on IOCB processing */
for (i = 0; i < phba->sli.num_rings; i++) {
for (i = 0; i < phba->sli.num_rings; i++)
lpfc_sli_turn_on_ring(phba, i);
}
/* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
while (!list_empty(&phba->freebufList)) {
struct lpfc_dmabuf *mp;
mp = NULL;
list_remove_head((&phba->freebufList),
mp,
struct lpfc_dmabuf,
list);
if (mp) {
lpfc_mbuf_free(phba, mp->virt,
mp->phys);
kfree(mp);
}
}
}
} while (process_next);
@ -833,6 +838,14 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
* All other are passed to the completion callback.
*/
if (pring->ringno == LPFC_ELS_RING) {
if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
cmdiocbp->iocb_flag &=
~LPFC_DRIVER_ABORTED;
saveq->iocb.ulpStatus =
IOSTAT_LOCAL_REJECT;
saveq->iocb.un.ulpWord[4] =
IOERR_SLI_ABORTED;
}
spin_unlock_irqrestore(phba->host->host_lock,
iflag);
(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@ -1464,8 +1477,9 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
int
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
LIST_HEAD(completions);
struct lpfc_iocbq *iocb, *next_iocb;
IOCB_t *icmd = NULL, *cmd = NULL;
IOCB_t *cmd = NULL;
int errcnt;
errcnt = 0;
@ -1474,46 +1488,28 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
* First do the txq.
*/
spin_lock_irq(phba->host->host_lock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
list_del_init(&iocb->list);
if (iocb->iocb_cmpl) {
icmd = &iocb->iocb;
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
spin_unlock_irq(phba->host->host_lock);
(iocb->iocb_cmpl) (phba, iocb, iocb);
spin_lock_irq(phba->host->host_lock);
} else
lpfc_sli_release_iocbq(phba, iocb);
}
list_splice_init(&pring->txq, &completions);
pring->txq_cnt = 0;
INIT_LIST_HEAD(&(pring->txq));
/* Next issue ABTS for everything on the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
spin_unlock_irq(phba->host->host_lock);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
/*
* Imediate abort of IOCB, deque and call compl
*/
list_del_init(&iocb->list);
pring->txcmplq_cnt--;
list_del(&iocb->list);
if (iocb->iocb_cmpl) {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
spin_unlock_irq(phba->host->host_lock);
(iocb->iocb_cmpl) (phba, iocb, iocb);
spin_lock_irq(phba->host->host_lock);
} else
lpfc_sli_release_iocbq(phba, iocb);
}
INIT_LIST_HEAD(&pring->txcmplq);
pring->txcmplq_cnt = 0;
spin_unlock_irq(phba->host->host_lock);
return errcnt;
}
@ -1588,6 +1584,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
hc_copy = readl(phba->HCregaddr);
writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
phba->fc_flag |= FC_IGNORE_ERATT;
if (readl(phba->HAregaddr) & HA_ERATT) {
/* Clear Chip error bit */
@ -1630,6 +1627,7 @@ clear_errat:
}
restore_hc:
phba->fc_flag &= ~FC_IGNORE_ERATT;
writel(hc_copy, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
@ -1665,6 +1663,7 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
status &= ~HC_ERINT_ENA;
writel(status, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
phba->fc_flag |= FC_IGNORE_ERATT;
spin_unlock_irq(phba->host->host_lock);
lpfc_kill_board(phba, pmb);
@ -1674,6 +1673,9 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
if (retval != MBX_SUCCESS) {
if (retval != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
spin_lock_irq(phba->host->host_lock);
phba->fc_flag &= ~FC_IGNORE_ERATT;
spin_unlock_irq(phba->host->host_lock);
return 1;
}
@ -1700,6 +1702,7 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
}
spin_lock_irq(phba->host->host_lock);
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
phba->fc_flag &= ~FC_IGNORE_ERATT;
spin_unlock_irq(phba->host->host_lock);
psli->mbox_active = NULL;
@ -1985,42 +1988,6 @@ lpfc_sli_hba_setup_exit:
return rc;
}
static void
lpfc_mbox_abort(struct lpfc_hba * phba)
{
LPFC_MBOXQ_t *pmbox;
MAILBOX_t *mb;
if (phba->sli.mbox_active) {
del_timer_sync(&phba->sli.mbox_tmo);
phba->work_hba_events &= ~WORKER_MBOX_TMO;
pmbox = phba->sli.mbox_active;
mb = &pmbox->mb;
phba->sli.mbox_active = NULL;
if (pmbox->mbox_cmpl) {
mb->mbxStatus = MBX_NOT_FINISHED;
(pmbox->mbox_cmpl) (phba, pmbox);
}
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
}
/* Abort all the non active mailbox commands. */
spin_lock_irq(phba->host->host_lock);
pmbox = lpfc_mbox_get(phba);
while (pmbox) {
mb = &pmbox->mb;
if (pmbox->mbox_cmpl) {
mb->mbxStatus = MBX_NOT_FINISHED;
spin_unlock_irq(phba->host->host_lock);
(pmbox->mbox_cmpl) (phba, pmbox);
spin_lock_irq(phba->host->host_lock);
}
pmbox = lpfc_mbox_get(phba);
}
spin_unlock_irq(phba->host->host_lock);
return;
}
/*! lpfc_mbox_timeout
*
* \pre
@ -2055,6 +2022,8 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmbox;
MAILBOX_t *mb;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
spin_lock_irq(phba->host->host_lock);
if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
@ -2062,8 +2031,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
return;
}
phba->work_hba_events &= ~WORKER_MBOX_TMO;
pmbox = phba->sli.mbox_active;
mb = &pmbox->mb;
@ -2078,17 +2045,32 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
phba->sli.sli_flag,
phba->sli.mbox_active);
phba->sli.mbox_active = NULL;
if (pmbox->mbox_cmpl) {
mb->mbxStatus = MBX_NOT_FINISHED;
spin_unlock_irq(phba->host->host_lock);
(pmbox->mbox_cmpl) (phba, pmbox);
spin_lock_irq(phba->host->host_lock);
}
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting state unknown so lpfc_sli_abort_iocb_ring
* would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
* it to fail all oustanding SCSI IO.
*/
phba->hba_state = LPFC_STATE_UNKNOWN;
phba->work_hba_events &= ~WORKER_MBOX_TMO;
phba->fc_flag |= FC_ESTABLISH_LINK;
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(phba->host->host_lock);
lpfc_mbox_abort(phba);
pring = &psli->ring[psli->fcp_ring];
lpfc_sli_abort_iocb_ring(phba, pring);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"%d:0316 Resetting board due to mailbox timeout\n",
phba->brd_no);
/*
* lpfc_offline calls lpfc_sli_hba_down which will clean up
* on oustanding mailbox commands.
*/
lpfc_offline_prep(phba);
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
if (lpfc_online(phba) == 0) /* Initialize the HBA */
mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
lpfc_unblock_mgmt_io(phba);
return;
}
@ -2320,9 +2302,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
spin_unlock_irqrestore(phba->host->host_lock,
drvr_flag);
/* Can be in interrupt context, do not sleep */
/* (or might be called with interrupts disabled) */
mdelay(1);
msleep(1);
spin_lock_irqsave(phba->host->host_lock, drvr_flag);
@ -2430,7 +2410,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
/*
* Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
* Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
* can be issued if the link is not up.
*/
switch (piocb->iocb.ulpCommand) {
@ -2444,6 +2424,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
piocb->iocb_cmpl = NULL;
/*FALLTHROUGH*/
case CMD_CREATE_XRI_CR:
case CMD_CLOSE_XRI_CN:
case CMD_CLOSE_XRI_CX:
break;
default:
goto iocb_busy;
@ -2637,11 +2619,12 @@ lpfc_sli_queue_setup(struct lpfc_hba * phba)
int
lpfc_sli_hba_down(struct lpfc_hba * phba)
{
LIST_HEAD(completions);
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
LPFC_MBOXQ_t *pmb;
struct lpfc_iocbq *iocb, *next_iocb;
IOCB_t *icmd = NULL;
struct lpfc_iocbq *iocb;
IOCB_t *cmd = NULL;
int i;
unsigned long flags = 0;
@ -2649,7 +2632,6 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
lpfc_hba_down_prep(phba);
spin_lock_irqsave(phba->host->host_lock, flags);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
pring->flag |= LPFC_DEFERRED_RING_EVENT;
@ -2658,28 +2640,25 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
* Error everything on the txq since these iocbs have not been
* given to the FW yet.
*/
list_splice_init(&pring->txq, &completions);
pring->txq_cnt = 0;
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
list_del_init(&iocb->list);
if (iocb->iocb_cmpl) {
icmd = &iocb->iocb;
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
spin_unlock_irqrestore(phba->host->host_lock,
flags);
(iocb->iocb_cmpl) (phba, iocb, iocb);
spin_lock_irqsave(phba->host->host_lock, flags);
} else
lpfc_sli_release_iocbq(phba, iocb);
}
INIT_LIST_HEAD(&(pring->txq));
}
spin_unlock_irqrestore(phba->host->host_lock, flags);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
list_del(&iocb->list);
if (iocb->iocb_cmpl) {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
(iocb->iocb_cmpl) (phba, iocb, iocb);
} else
lpfc_sli_release_iocbq(phba, iocb);
}
/* Return any active mbox cmds */
del_timer_sync(&psli->mbox_tmo);
spin_lock_irqsave(phba->host->host_lock, flags);
@ -2768,85 +2747,138 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
static void
lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb)
lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb)
{
struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
/* Free the resources associated with the ELS_REQUEST64 IOCB the driver
* just aborted.
* In this case, context2 = cmd, context2->next = rsp, context3 = bpl
*/
if (cmdiocb->context2) {
buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2;
IOCB_t *irsp;
uint16_t abort_iotag, abort_context;
struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
/* Free the response IOCB before completing the abort
command. */
buf_ptr = NULL;
list_remove_head((&buf_ptr1->list), buf_ptr,
struct lpfc_dmabuf, list);
if (buf_ptr) {
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
abort_iocb = NULL;
irsp = &rspiocb->iocb;
spin_lock_irq(phba->host->host_lock);
if (irsp->ulpStatus) {
abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0327 Cannot abort els iocb %p"
" with tag %x context %x\n",
phba->brd_no, abort_iocb,
abort_iotag, abort_context);
/*
* make sure we have the right iocbq before taking it
* off the txcmplq and try to call completion routine.
*/
if (abort_iocb &&
abort_iocb->iocb.ulpContext == abort_context &&
abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
list_del(&abort_iocb->list);
pring->txcmplq_cnt--;
rsp_ab_iocb = lpfc_sli_get_iocbq(phba);
if (rsp_ab_iocb == NULL)
lpfc_sli_release_iocbq(phba, abort_iocb);
else {
abort_iocb->iocb_flag &=
~LPFC_DRIVER_ABORTED;
rsp_ab_iocb->iocb.ulpStatus =
IOSTAT_LOCAL_REJECT;
rsp_ab_iocb->iocb.un.ulpWord[4] =
IOERR_SLI_ABORTED;
spin_unlock_irq(phba->host->host_lock);
(abort_iocb->iocb_cmpl)
(phba, abort_iocb, rsp_ab_iocb);
spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, rsp_ab_iocb);
}
}
lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
kfree(buf_ptr1);
}
if (cmdiocb->context3) {
buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
}
lpfc_sli_release_iocbq(phba, cmdiocb);
spin_unlock_irq(phba->host->host_lock);
return;
}
int
lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring,
struct lpfc_iocbq * cmdiocb)
lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring,
struct lpfc_iocbq * cmdiocb)
{
struct lpfc_iocbq *abtsiocbp;
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
int retval = IOCB_ERROR;
/* There are certain command types we don't want
* to abort.
*/
icmd = &cmdiocb->iocb;
if ((icmd->ulpCommand == CMD_ABORT_XRI_CN) ||
(icmd->ulpCommand == CMD_CLOSE_XRI_CN))
return 0;
/* If we're unloading, interrupts are disabled so we
* need to cleanup the iocb here.
*/
if (phba->fc_flag & FC_UNLOADING)
goto abort_iotag_exit;
/* issue ABTS for this IOCB based on iotag */
abtsiocbp = lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
return 0;
/* This signals the response to set the correct status
* before calling the completion handler.
*/
cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
iabt = &abtsiocbp->iocb;
icmd = &cmdiocb->iocb;
switch (icmd->ulpCommand) {
case CMD_ELS_REQUEST64_CR:
/* Even though we abort the ELS command, the firmware may access
* the BPL or other resources before it processes our
* ABORT_MXRI64. Thus we must delay reusing the cmdiocb
* resources till the actual abort request completes.
*/
abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand);
abtsiocbp->context2 = cmdiocb->context2;
abtsiocbp->context3 = cmdiocb->context3;
cmdiocb->context2 = NULL;
cmdiocb->context3 = NULL;
abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
break;
default:
lpfc_sli_release_iocbq(phba, abtsiocbp);
return 0;
}
iabt->un.amxri.abortType = ABORT_TYPE_ABTS;
iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32;
iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
iabt->un.acxri.abortContextTag = icmd->ulpContext;
iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
iabt->ulpLe = 1;
iabt->ulpClass = CLASS3;
iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
iabt->ulpClass = icmd->ulpClass;
if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, abtsiocbp);
return 0;
if (phba->hba_state >= LPFC_LINK_UP)
iabt->ulpCommand = CMD_ABORT_XRI_CN;
else
iabt->ulpCommand = CMD_CLOSE_XRI_CN;
abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0339 Abort xri x%x, original iotag x%x, abort "
"cmd iotag x%x\n",
phba->brd_no, iabt->un.acxri.abortContextTag,
iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
abort_iotag_exit:
/* If we could not issue an abort dequeue the iocb and handle
* the completion here.
*/
if (retval == IOCB_ERROR) {
list_del(&cmdiocb->list);
pring->txcmplq_cnt--;
if (cmdiocb->iocb_cmpl) {
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
spin_unlock_irq(phba->host->host_lock);
(cmdiocb->iocb_cmpl) (phba, cmdiocb, cmdiocb);
spin_lock_irq(phba->host->host_lock);
} else
lpfc_sli_release_iocbq(phba, cmdiocb);
}
return 1;
@ -2918,9 +2950,11 @@ void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb)
{
spin_lock_irq(phba->host->host_lock);
unsigned long iflags;
spin_lock_irqsave(phba->host->host_lock, iflags);
lpfc_sli_release_iocbq(phba, cmdiocb);
spin_unlock_irq(phba->host->host_lock);
spin_unlock_irqrestore(phba->host->host_lock, iflags);
return;
}
@ -3043,22 +3077,22 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
timeout_req);
spin_lock_irq(phba->host->host_lock);
if (timeleft == 0) {
if (piocb->iocb_flag & LPFC_IO_WAKE) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0331 IOCB wake signaled\n",
phba->brd_no);
} else if (timeleft == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0338 IOCB wait timeout error - no "
"wake response Data x%x\n",
phba->brd_no, timeout);
retval = IOCB_TIMEDOUT;
} else if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0330 IOCB wake NOT set, "
"Data x%x x%lx\n", phba->brd_no,
timeout, (timeleft / jiffies));
retval = IOCB_TIMEDOUT;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0331 IOCB wake signaled\n",
phba->brd_no);
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@ -3087,8 +3121,6 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
uint32_t timeout)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
DECLARE_WAITQUEUE(wq_entry, current);
uint32_t timeleft = 0;
int retval;
/* The caller must leave context1 empty. */
@ -3101,27 +3133,25 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = &done_q;
/* start to sleep before we wait, to avoid races */
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&done_q, &wq_entry);
/* now issue the command */
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
timeleft = schedule_timeout(timeout * HZ);
wait_event_interruptible_timeout(done_q,
pmboxq->mbox_flag & LPFC_MBX_WAKE,
timeout * HZ);
pmboxq->context1 = NULL;
/* if schedule_timeout returns 0, we timed out and were not
woken up */
if ((timeleft == 0) || signal_pending(current))
retval = MBX_TIMEOUT;
else
/*
* if LPFC_MBX_WAKE flag is set the mailbox is completed
* else do not free the resources.
*/
if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
retval = MBX_SUCCESS;
else
retval = MBX_TIMEOUT;
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&done_q, &wq_entry);
return retval;
}
@ -3184,6 +3214,11 @@ lpfc_intr_handler(int irq, void *dev_id)
*/
spin_lock(phba->host->host_lock);
ha_copy = readl(phba->HAregaddr);
/* If somebody is waiting to handle an eratt don't process it
* here. The brdkill function will do this.
*/
if (phba->fc_flag & FC_IGNORE_ERATT)
ha_copy &= ~HA_ERATT;
writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
spin_unlock(phba->host->host_lock);

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -39,9 +39,10 @@ struct lpfc_iocbq {
IOCB_t iocb; /* IOCB cmd */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
uint8_t iocb_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
uint8_t abort_count;
uint8_t rsvd2;
@ -67,6 +68,8 @@ struct lpfc_iocbq {
#define IOCB_ERROR 2
#define IOCB_TIMEDOUT 3
#define LPFC_MBX_WAKE 1
typedef struct lpfcMboxq {
/* MBOXQs are used in single linked lists */
struct list_head list; /* ptr to next mailbox command */
@ -75,6 +78,7 @@ typedef struct lpfcMboxq {
void *context2; /* caller context information */
void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
uint8_t mbox_flag;
} LPFC_MBOXQ_t;

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -18,12 +18,12 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.1.11"
#define LPFC_DRIVER_VERSION "8.1.12"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
#define LPFC_COPYRIGHT "Copyright(c) 2004-2006 Emulex. All rights reserved."
#define LPFC_COPYRIGHT "Copyright(c) 2004-2007 Emulex. All rights reserved."
#define DFC_API_VERSION "0.0.0"

View File

@ -1754,7 +1754,8 @@ __mega_busywait_mbox (adapter_t *adapter)
for (counter = 0; counter < 10000; counter++) {
if (!mbox->m_in.busy)
return 0;
udelay(100); yield();
udelay(100);
cond_resched();
}
return -1; /* give up after 1 second */
}
@ -3177,7 +3178,10 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end )
return len;
}
#else
static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent)
{
}
#endif
@ -4342,7 +4346,7 @@ mega_support_cluster(adapter_t *adapter)
return 0;
}
#ifdef CONFIG_PROC_FS
/**
* mega_adapinq()
* @adapter - pointer to our soft state
@ -4447,7 +4451,7 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
return rval;
}
#endif
/**
* mega_internal_command()
@ -4965,7 +4969,6 @@ megaraid_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
adapter_t *adapter = (adapter_t *)host->hostdata;
char buf[12] = { 0 };
scsi_remove_host(host);
@ -5011,8 +5014,11 @@ megaraid_remove_one(struct pci_dev *pdev)
remove_proc_entry("raiddrives-30-39",
adapter->controller_proc_dir_entry);
#endif
sprintf(buf, "hba%d", adapter->host->host_no);
remove_proc_entry(buf, mega_proc_dir_entry);
{
char buf[12] = { 0 };
sprintf(buf, "hba%d", adapter->host->host_no);
remove_proc_entry(buf, mega_proc_dir_entry);
}
}
#endif

View File

@ -1002,7 +1002,6 @@ static int megaraid_reset(Scsi_Cmnd *);
static int megaraid_abort_and_reset(adapter_t *, Scsi_Cmnd *, int);
static int megaraid_biosparam(struct scsi_device *, struct block_device *,
sector_t, int []);
static int mega_print_inquiry(char *, char *);
static int mega_build_sglist (adapter_t *adapter, scb_t *scb,
u32 *buffer, u32 *length);
@ -1024,6 +1023,7 @@ static int mega_init_scb (adapter_t *);
static int mega_is_bios_enabled (adapter_t *);
#ifdef CONFIG_PROC_FS
static int mega_print_inquiry(char *, char *);
static void mega_create_proc_entry(int, struct proc_dir_entry *);
static int proc_read_config(char *, char **, off_t, int, int *, void *);
static int proc_read_stat(char *, char **, off_t, int, int *, void *);
@ -1040,10 +1040,10 @@ static int proc_rdrv_20(char *, char **, off_t, int, int *, void *);
static int proc_rdrv_30(char *, char **, off_t, int, int *, void *);
static int proc_rdrv_40(char *, char **, off_t, int, int *, void *);
static int proc_rdrv(adapter_t *, char *, int, int);
#endif
static int mega_adapinq(adapter_t *, dma_addr_t);
static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t);
#endif
static int mega_support_ext_cdb(adapter_t *);
static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *,

View File

@ -60,7 +60,7 @@ EXPORT_SYMBOL(mraid_mm_unregister_adp);
EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
static int majorno;
static uint32_t drvr_ver = 0x02200206;
static uint32_t drvr_ver = 0x02200207;
static int adapters_count_g;
static struct list_head adapters_list_g;

View File

@ -185,7 +185,7 @@ struct mesh_state {
* Driver is too messy, we need a few prototypes...
*/
static void mesh_done(struct mesh_state *ms, int start_next);
static void mesh_interrupt(int irq, void *dev_id);
static void mesh_interrupt(struct mesh_state *ms);
static void cmd_complete(struct mesh_state *ms);
static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd);
static void halt_dma(struct mesh_state *ms);
@ -466,7 +466,7 @@ static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x",
MKWORD(mr->interrupt, mr->exception,
mr->error, mr->fifo_count));
mesh_interrupt(0, (void *)ms);
mesh_interrupt(ms);
if (ms->phase != arbitrating)
return;
}
@ -504,7 +504,7 @@ static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x",
MKWORD(mr->interrupt, mr->exception,
mr->error, mr->fifo_count));
mesh_interrupt(0, (void *)ms);
mesh_interrupt(ms);
if (ms->phase != arbitrating)
return;
dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x",
@ -1018,10 +1018,11 @@ static void handle_reset(struct mesh_state *ms)
static irqreturn_t do_mesh_interrupt(int irq, void *dev_id)
{
unsigned long flags;
struct Scsi_Host *dev = ((struct mesh_state *)dev_id)->host;
struct mesh_state *ms = dev_id;
struct Scsi_Host *dev = ms->host;
spin_lock_irqsave(dev->host_lock, flags);
mesh_interrupt(irq, dev_id);
mesh_interrupt(ms);
spin_unlock_irqrestore(dev->host_lock, flags);
return IRQ_HANDLED;
}
@ -1661,9 +1662,8 @@ static int mesh_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
* handler (do_mesh_interrupt) or by other functions in
* exceptional circumstances
*/
static void mesh_interrupt(int irq, void *dev_id)
static void mesh_interrupt(struct mesh_state *ms)
{
struct mesh_state *ms = (struct mesh_state *) dev_id;
volatile struct mesh_regs __iomem *mr = ms->mesh;
int intr;

View File

@ -4293,7 +4293,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->devnum = devnum; /* specifies microcode load address */
#ifdef QLA_64BIT_PTR
if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) {
if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
"suitable DMA mask - aborting\n", ha->host_no);

View File

@ -130,18 +130,17 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
int
qla2100_pci_config(scsi_qla_host_t *ha)
{
uint16_t w, mwi;
int ret;
uint16_t w;
uint32_t d;
unsigned long flags;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
pci_set_master(ha->pdev);
mwi = 0;
if (pci_set_mwi(ha->pdev))
mwi = PCI_COMMAND_INVALIDATE;
ret = pci_set_mwi(ha->pdev);
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
/* Reset expansion ROM address decode enable */
@ -166,22 +165,22 @@ qla2100_pci_config(scsi_qla_host_t *ha)
int
qla2300_pci_config(scsi_qla_host_t *ha)
{
uint16_t w, mwi;
int ret;
uint16_t w;
uint32_t d;
unsigned long flags = 0;
uint32_t cnt;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
pci_set_master(ha->pdev);
mwi = 0;
if (pci_set_mwi(ha->pdev))
mwi = PCI_COMMAND_INVALIDATE;
ret = pci_set_mwi(ha->pdev);
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
if (IS_QLA2322(ha) || IS_QLA6322(ha))
w &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
/*
* If this is a 2300 card and not 2312, reset the
@ -210,7 +209,7 @@ qla2300_pci_config(scsi_qla_host_t *ha)
ha->fb_rev = RD_FB_CMD_REG(ha, reg);
if (ha->fb_rev == FPM_2300)
w &= ~PCI_COMMAND_INVALIDATE;
pci_clear_mwi(ha->pdev);
/* Deselect FPM registers. */
WRT_REG_WORD(&reg->ctrl_status, 0x0);
@ -227,7 +226,6 @@ qla2300_pci_config(scsi_qla_host_t *ha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
@ -253,19 +251,18 @@ qla2300_pci_config(scsi_qla_host_t *ha)
int
qla24xx_pci_config(scsi_qla_host_t *ha)
{
uint16_t w, mwi;
int ret;
uint16_t w;
uint32_t d;
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
int pcix_cmd_reg, pcie_dctl_reg;
pci_set_master(ha->pdev);
mwi = 0;
if (pci_set_mwi(ha->pdev))
mwi = PCI_COMMAND_INVALIDATE;
ret = pci_set_mwi(ha->pdev);
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
w &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
@ -3931,6 +3928,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
return;
if (!ha->fw_major_version)
return;
ret = qla2x00_stop_firmware(ha);
for (retries = 5; ret != QLA_SUCCESS && retries ; retries--) {

View File

@ -1726,6 +1726,17 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
qla_printk(KERN_WARNING, ha,
"MSI-X: Falling back-to INTa mode -- %d.\n", ret);
skip_msix:
if (!IS_QLA24XX(ha))
goto skip_msi;
ret = pci_enable_msi(ha->pdev);
if (!ret) {
DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
ha->flags.msi_enabled = 1;
}
skip_msi:
ret = request_irq(ha->pdev->irq, ha->isp_ops.intr_handler,
IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
if (!ret) {
@ -1746,6 +1757,8 @@ qla2x00_free_irqs(scsi_qla_host_t *ha)
if (ha->flags.msix_enabled)
qla24xx_disable_msix(ha);
else if (ha->flags.inta_enabled)
else if (ha->flags.inta_enabled) {
free_irq(ha->host->irq, ha);
pci_disable_msi(ha->pdev);
}
}

View File

@ -36,7 +36,7 @@ module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xlogintimeout,
"Login timeout value in seconds.");
int qlport_down_retry = 30;
int qlport_down_retry;
module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(qlport_down_retry,
"Maximum number of command retries to a port that returns "
@ -1577,9 +1577,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto probe_failed;
}
if (qla2x00_initialize_adapter(ha) &&
!(ha->device_flags & DFLG_NO_CABLE)) {
if (qla2x00_initialize_adapter(ha)) {
qla_printk(KERN_WARNING, ha,
"Failed to initialize adapter\n");

View File

@ -7,7 +7,7 @@
/*
* Driver version
*/
#define QLA2XXX_VERSION "8.01.07-k6"
#define QLA2XXX_VERSION "8.01.07-k7"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 1

View File

@ -8,6 +8,8 @@
#include "ql4_def.h"
#include <scsi/scsi_dbg.h>
#if 0
static void qla4xxx_print_srb_info(struct srb * srb)
{
printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
@ -195,3 +197,5 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
if (cnt % 16)
printk(KERN_DEBUG "\n");
}
#endif /* 0 */

View File

@ -43,8 +43,6 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
uint16_t *tcp_source_port_num,
uint16_t *connection_id);
struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host * ha,
uint32_t fw_ddb_index);
int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
dma_addr_t fw_ddb_entry_dma);
@ -55,18 +53,11 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha);
struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
int qla4xxx_add_sess(struct ddb_entry *);
void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
uint16_t fw_ddb_index,
uint16_t connection_id,
uint16_t option);
int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
uint16_t fw_ddb_index);
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha);
int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
uint32_t intr_status);
int qla4xxx_init_rings(struct scsi_qla_host * ha);
void qla4xxx_dump_buffer(void *b, uint32_t size);
struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index);
void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);

View File

@ -7,9 +7,8 @@
#include "ql4_def.h"
/*
* QLogic ISP4xxx Hardware Support Function Prototypes.
*/
static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
uint32_t fw_ddb_index);
static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
{
@ -48,7 +47,8 @@ static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
* This routine deallocates and unlinks the specified ddb_entry from the
* adapter's
**/
void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry)
static void qla4xxx_free_ddb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
/* Remove device entry from list */
list_del_init(&ddb_entry->list);
@ -370,9 +370,9 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
* must be initialized prior to calling this routine
*
**/
int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry,
uint32_t fw_ddb_index)
static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry,
uint32_t fw_ddb_index)
{
struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
@ -450,8 +450,8 @@ int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
* This routine allocates a ddb_entry, ititializes some values, and
* inserts it into the ddb list.
**/
struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
uint32_t fw_ddb_index)
static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
uint32_t fw_ddb_index)
{
struct ddb_entry *ddb_entry;

View File

@ -19,8 +19,8 @@
* - advances the request_in pointer
* - checks for queue full
**/
int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
struct queue_entry **queue_entry)
static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
struct queue_entry **queue_entry)
{
uint16_t request_in;
uint8_t status = QLA_SUCCESS;
@ -62,8 +62,8 @@ int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
*
* This routine issues a marker IOCB.
**/
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, int lun)
static int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, int lun)
{
struct marker_entry *marker_entry;
unsigned long flags = 0;
@ -96,7 +96,7 @@ exit_send_marker:
return status;
}
struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
static struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
struct scsi_qla_host *ha)
{
struct continuation_t1_entry *cont_entry;
@ -120,7 +120,7 @@ struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
return cont_entry;
}
uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
{
uint16_t iocbs;
@ -133,9 +133,9 @@ uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
return iocbs;
}
void qla4xxx_build_scsi_iocbs(struct srb *srb,
struct command_t3_entry *cmd_entry,
uint16_t tot_dsds)
static void qla4xxx_build_scsi_iocbs(struct srb *srb,
struct command_t3_entry *cmd_entry,
uint16_t tot_dsds)
{
struct scsi_qla_host *ha;
uint16_t avail_dsds;

View File

@ -20,9 +20,9 @@
* If outCount is 0, this routine completes successfully WITHOUT waiting
* for the mailbox command to complete.
**/
int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
uint8_t outCount, uint32_t *mbx_cmd,
uint32_t *mbx_sts)
static int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
uint8_t outCount, uint32_t *mbx_cmd,
uint32_t *mbx_sts)
{
int status = QLA_ERROR;
uint8_t i;
@ -170,6 +170,8 @@ mbox_exit:
}
#if 0
/**
* qla4xxx_issue_iocb - issue mailbox iocb command
* @ha: adapter state pointer.
@ -243,6 +245,8 @@ int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
return QLA_SUCCESS;
}
#endif /* 0 */
/**
* qla4xxx_initialize_fw_cb - initializes firmware control block.
* @ha: Pointer to host adapter structure.
@ -570,6 +574,7 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
return qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
}
#if 0
int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
uint16_t fw_ddb_index)
{
@ -594,6 +599,7 @@ int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
return status;
}
#endif /* 0 */
/**
* qla4xxx_get_crash_record - retrieves crash record.
@ -649,6 +655,7 @@ exit_get_crash_record:
crash_record, crash_record_dma);
}
#if 0
/**
* qla4xxx_get_conn_event_log - retrieves connection event log
* @ha: Pointer to host adapter structure.
@ -738,6 +745,7 @@ exit_get_event_log:
dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
event_log_dma);
}
#endif /* 0 */
/**
* qla4xxx_reset_lun - issues LUN Reset
@ -834,7 +842,8 @@ int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
return QLA_SUCCESS;
}
int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, dma_addr_t dma_addr)
static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
dma_addr_t dma_addr)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@ -855,7 +864,7 @@ int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, dma_addr_t dma_addr)
return QLA_SUCCESS;
}
int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];

View File

@ -14,7 +14,7 @@
/*
* Driver version
*/
char qla4xxx_version_str[40];
static char qla4xxx_version_str[40];
/*
* SRB allocation cache
@ -45,8 +45,7 @@ int ql4_mod_unload = 0;
/*
* SCSI host template entry points
*/
void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
/*
* iSCSI template entry points
@ -1352,7 +1351,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
* At exit, the @ha's flags.enable_64bit_addressing set to indicated
* supported addressing method.
*/
void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
{
int retval;
@ -1627,7 +1626,7 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
struct pci_driver qla4xxx_pci_driver = {
static struct pci_driver qla4xxx_pci_driver = {
.name = DRIVER_NAME,
.id_table = qla4xxx_pci_tbl,
.probe = qla4xxx_probe_adapter,

View File

@ -38,7 +38,6 @@
#include "scsi_logging.h"
#define SENSE_TIMEOUT (10*HZ)
#define START_UNIT_TIMEOUT (30*HZ)
/*
* These should *probably* be handled by the host itself.
@ -936,7 +935,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
START_UNIT_TIMEOUT, 0);
scmd->device->timeout, 0);
if (rtn == SUCCESS)
return 0;

View File

@ -1718,31 +1718,12 @@ fc_starget_delete(struct work_struct *work)
struct fc_rport *rport =
container_of(work, struct fc_rport, stgt_delete_work);
struct Scsi_Host *shost = rport_to_shost(rport);
unsigned long flags;
struct fc_internal *i = to_fc_internal(shost->transportt);
/*
* Involve the LLDD if possible. All io on the rport is to
* be terminated, either as part of the dev_loss_tmo callback
* processing, or via the terminate_rport_io function.
*/
if (i->f->dev_loss_tmo_callbk)
i->f->dev_loss_tmo_callbk(rport);
else if (i->f->terminate_rport_io)
/* Involve the LLDD if possible to terminate all io on the rport. */
if (i->f->terminate_rport_io)
i->f->terminate_rport_io(rport);
spin_lock_irqsave(shost->host_lock, flags);
if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
spin_unlock_irqrestore(shost->host_lock, flags);
if (!cancel_delayed_work(&rport->fail_io_work))
fc_flush_devloss(shost);
if (!cancel_delayed_work(&rport->dev_loss_work))
fc_flush_devloss(shost);
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
}
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_remove_target(&rport->dev);
}
@ -1760,6 +1741,7 @@ fc_rport_final_delete(struct work_struct *work)
struct device *dev = &rport->dev;
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_internal *i = to_fc_internal(shost->transportt);
unsigned long flags;
/*
* if a scan is pending, flush the SCSI Host work_q so that
@ -1768,13 +1750,37 @@ fc_rport_final_delete(struct work_struct *work)
if (rport->flags & FC_RPORT_SCAN_PENDING)
scsi_flush_work(shost);
/* involve the LLDD to terminate all pending i/o */
if (i->f->terminate_rport_io)
i->f->terminate_rport_io(rport);
/*
* Cancel any outstanding timers. These should really exist
* only when rmmod'ing the LLDD and we're asking for
* immediate termination of the rports
*/
spin_lock_irqsave(shost->host_lock, flags);
if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
spin_unlock_irqrestore(shost->host_lock, flags);
if (!cancel_delayed_work(&rport->fail_io_work))
fc_flush_devloss(shost);
if (!cancel_delayed_work(&rport->dev_loss_work))
fc_flush_devloss(shost);
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
}
spin_unlock_irqrestore(shost->host_lock, flags);
/* Delete SCSI target and sdevs */
if (rport->scsi_target_id != -1)
fc_starget_delete(&rport->stgt_delete_work);
else if (i->f->dev_loss_tmo_callbk)
/*
* Notify the driver that the rport is now dead. The LLDD will
* also guarantee that any communication to the rport is terminated
*/
if (i->f->dev_loss_tmo_callbk)
i->f->dev_loss_tmo_callbk(rport);
else if (i->f->terminate_rport_io)
i->f->terminate_rport_io(rport);
transport_remove_device(dev);
device_del(dev);
@ -1963,8 +1969,6 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
}
if (match) {
struct delayed_work *work =
&rport->dev_loss_work;
memcpy(&rport->node_name, &ids->node_name,
sizeof(rport->node_name));
@ -1982,46 +1986,61 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
fci->f->dd_fcrport_size);
/*
* If we were blocked, we were a target.
* If no longer a target, we leave the timer
* running in case the port changes roles
* prior to the timer expiring. If the timer
* fires, the target will be torn down.
* If we were not a target, cancel the
* io terminate and rport timers, and
* we're done.
*
* If we were a target, but our new role
* doesn't indicate a target, leave the
* timers running expecting the role to
* change as the target fully logs in. If
* it doesn't, the target will be torn down.
*
* If we were a target, and our role shows
* we're still a target, cancel the timers
* and kick off a scan.
*/
if (!(ids->roles & FC_RPORT_ROLE_FCP_TARGET))
/* was a target, not in roles */
if ((rport->scsi_target_id != -1) &&
(!(ids->roles & FC_RPORT_ROLE_FCP_TARGET)))
return rport;
/* restart the target */
/*
* Stop the target timers first. Take no action
* on the del_timer failure as the state
* machine state change will validate the
* transaction.
* Stop the fail io and dev_loss timers.
* If they flush, the port_state will
* be checked and will NOOP the function.
*/
if (!cancel_delayed_work(&rport->fail_io_work))
fc_flush_devloss(shost);
if (!cancel_delayed_work(work))
if (!cancel_delayed_work(&rport->dev_loss_work))
fc_flush_devloss(shost);
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
/* initiate a scan of the target */
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost, &rport->scan_work);
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_target_unblock(&rport->dev);
/* if target, initiate a scan */
if (rport->scsi_target_id != -1) {
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost,
&rport->scan_work);
spin_unlock_irqrestore(shost->host_lock,
flags);
scsi_target_unblock(&rport->dev);
} else
spin_unlock_irqrestore(shost->host_lock,
flags);
return rport;
}
}
}
/* Search the bindings array */
/*
* Search the bindings array
* Note: if never a FCP target, you won't be on this list
*/
if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) {
/* search for a matching consistent binding */
@ -2158,15 +2177,24 @@ fc_remote_port_delete(struct fc_rport *rport)
spin_lock_irqsave(shost->host_lock, flags);
/* If no scsi target id mapping, delete it */
if (rport->scsi_target_id == -1) {
list_del(&rport->peers);
rport->port_state = FC_PORTSTATE_DELETED;
fc_queue_work(shost, &rport->rport_delete_work);
if (rport->port_state != FC_PORTSTATE_ONLINE) {
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
/*
* In the past, we if this was not an FCP-Target, we would
* unconditionally just jump to deleting the rport.
* However, rports can be used as node containers by the LLDD,
* and its not appropriate to just terminate the rport at the
* first sign of a loss in connectivity. The LLDD may want to
* send ELS traffic to re-validate the login. If the rport is
* immediately deleted, it makes it inappropriate for a node
* container.
* So... we now unconditionally wait dev_loss_tmo before
* destroying an rport.
*/
rport->port_state = FC_PORTSTATE_BLOCKED;
rport->flags |= FC_RPORT_DEVLOSS_PENDING;
@ -2263,11 +2291,11 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
EXPORT_SYMBOL(fc_remote_port_rolechg);
/**
* fc_timeout_deleted_rport - Timeout handler for a deleted remote port that
* was a SCSI target (thus was blocked), and failed
* to return in the alloted time.
* fc_timeout_deleted_rport - Timeout handler for a deleted remote port,
* which we blocked, and has now failed to return
* in the allotted time.
*
* @work: rport target that failed to reappear in the alloted time.
* @work: rport target that failed to reappear in the allotted time.
**/
static void
fc_timeout_deleted_rport(struct work_struct *work)
@ -2283,10 +2311,12 @@ fc_timeout_deleted_rport(struct work_struct *work)
rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
/*
* If the port is ONLINE, then it came back. Validate it's still an
* FCP target. If not, tear down the scsi_target on it.
* If the port is ONLINE, then it came back. If it was a SCSI
* target, validate it still is. If not, tear down the
* scsi_target on it.
*/
if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
(rport->scsi_target_id != -1) &&
!(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
dev_printk(KERN_ERR, &rport->dev,
"blocked FC remote port time out: no longer"
@ -2297,18 +2327,24 @@ fc_timeout_deleted_rport(struct work_struct *work)
return;
}
/* NOOP state - we're flushing workq's */
if (rport->port_state != FC_PORTSTATE_BLOCKED) {
spin_unlock_irqrestore(shost->host_lock, flags);
dev_printk(KERN_ERR, &rport->dev,
"blocked FC remote port time out: leaving target alone\n");
"blocked FC remote port time out: leaving"
" rport%s alone\n",
(rport->scsi_target_id != -1) ? " and starget" : "");
return;
}
if (fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) {
if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) ||
(rport->scsi_target_id == -1)) {
list_del(&rport->peers);
rport->port_state = FC_PORTSTATE_DELETED;
dev_printk(KERN_ERR, &rport->dev,
"blocked FC remote port time out: removing target\n");
"blocked FC remote port time out: removing"
" rport%s\n",
(rport->scsi_target_id != -1) ? " and starget" : "");
fc_queue_work(shost, &rport->rport_delete_work);
spin_unlock_irqrestore(shost->host_lock, flags);
return;

View File

@ -351,6 +351,27 @@ static u8 dc390_clock_speed[] = {100,80,67,57,50, 40, 31, 20};
* (DCBs, SRBs, Queueing)
*
**********************************************************************/
static void inline dc390_start_segment(struct dc390_srb* pSRB)
{
struct scatterlist *psgl = pSRB->pSegmentList;
/* start new sg segment */
pSRB->SGBusAddr = sg_dma_address(psgl);
pSRB->SGToBeXferLen = sg_dma_len(psgl);
}
static unsigned long inline dc390_advance_segment(struct dc390_srb* pSRB, u32 residue)
{
unsigned long xfer = pSRB->SGToBeXferLen - residue;
/* xfer more bytes transferred */
pSRB->SGBusAddr += xfer;
pSRB->TotalXferredLen += xfer;
pSRB->SGToBeXferLen = residue;
return xfer;
}
static struct dc390_dcb __inline__ *dc390_findDCB ( struct dc390_acb* pACB, u8 id, u8 lun)
{
struct dc390_dcb* pDCB = pACB->pLinkDCB; if (!pDCB) return NULL;
@ -625,70 +646,6 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
return 0;
}
//#define DMA_INT EN_DMA_INT /*| EN_PAGE_INT*/
#define DMA_INT 0
#if DMA_INT
/* This is similar to AM53C974.c ... */
static u8
dc390_dma_intr (struct dc390_acb* pACB)
{
struct dc390_srb* pSRB;
u8 dstate;
DEBUG0(u16 pstate; struct pci_dev *pdev = pACB->pdev);
DEBUG0(pci_read_config_word(pdev, PCI_STATUS, &pstate));
DEBUG0(if (pstate & (PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY))\
{ printk(KERN_WARNING "DC390: PCI state = %04x!\n", pstate); \
pci_write_config_word(pdev, PCI_STATUS, (PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY));});
dstate = DC390_read8 (DMA_Status);
if (! pACB->pActiveDCB || ! pACB->pActiveDCB->pActiveSRB) return dstate;
else pSRB = pACB->pActiveDCB->pActiveSRB;
if (dstate & (DMA_XFER_ABORT | DMA_XFER_ERROR | POWER_DOWN | PCI_MS_ABORT))
{
printk (KERN_ERR "DC390: DMA error (%02x)!\n", dstate);
return dstate;
}
if (dstate & DMA_XFER_DONE)
{
u32 residual, xferCnt; int ctr = 6000000;
if (! (DC390_read8 (DMA_Cmd) & READ_DIRECTION))
{
do
{
DEBUG1(printk (KERN_DEBUG "DC390: read residual bytes ... \n"));
dstate = DC390_read8 (DMA_Status);
residual = DC390_read8 (CtcReg_Low) | DC390_read8 (CtcReg_Mid) << 8 |
DC390_read8 (CtcReg_High) << 16;
residual += DC390_read8 (Current_Fifo) & 0x1f;
} while (residual && ! (dstate & SCSI_INTERRUPT) && --ctr);
if (!ctr) printk (KERN_CRIT "DC390: dma_intr: DMA aborted unfinished: %06x bytes remain!!\n", DC390_read32 (DMA_Wk_ByteCntr));
/* residual = ... */
}
else
residual = 0;
/* ??? */
xferCnt = pSRB->SGToBeXferLen - residual;
pSRB->SGBusAddr += xferCnt;
pSRB->TotalXferredLen += xferCnt;
pSRB->SGToBeXferLen = residual;
# ifdef DC390_DEBUG0
printk (KERN_INFO "DC390: DMA: residual = %i, xfer = %i\n",
(unsigned int)residual, (unsigned int)xferCnt);
# endif
DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
}
dc390_laststatus &= ~0xff000000; dc390_laststatus |= dstate << 24;
return dstate;
}
#endif
static void __inline__
dc390_InvalidCmd(struct dc390_acb* pACB)
@ -708,9 +665,6 @@ DC390_Interrupt(void *dev_id)
u8 phase;
void (*stateV)( struct dc390_acb*, struct dc390_srb*, u8 *);
u8 istate, istatus;
#if DMA_INT
u8 dstatus;
#endif
sstatus = DC390_read8 (Scsi_Status);
if( !(sstatus & INTERRUPT) )
@ -718,22 +672,9 @@ DC390_Interrupt(void *dev_id)
DEBUG1(printk (KERN_DEBUG "sstatus=%02x,", sstatus));
#if DMA_INT
spin_lock_irq(pACB->pScsiHost->host_lock);
dstatus = dc390_dma_intr (pACB);
spin_unlock_irq(pACB->pScsiHost->host_lock);
DEBUG1(printk (KERN_DEBUG "dstatus=%02x,", dstatus));
if (! (dstatus & SCSI_INTERRUPT))
{
DEBUG0(printk (KERN_WARNING "DC390 Int w/o SCSI actions (only DMA?)\n"));
return IRQ_NONE;
}
#else
//DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT);
//dstatus = DC390_read8 (DMA_Status);
//DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT);
#endif
spin_lock_irq(pACB->pScsiHost->host_lock);
@ -821,11 +762,10 @@ static irqreturn_t do_DC390_Interrupt(int irq, void *dev_id)
}
static void
dc390_DataOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
dc390_DataOut_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
u8 sstatus;
struct scatterlist *psgl;
u32 ResidCnt, xferCnt;
u32 ResidCnt;
u8 dstate = 0;
sstatus = *psstatus;
@ -856,42 +796,35 @@ dc390_DataOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
if( pSRB->SGIndex < pSRB->SGcount )
{
pSRB->pSegmentList++;
psgl = pSRB->pSegmentList;
pSRB->SGBusAddr = cpu_to_le32(pci_dma_lo32(sg_dma_address(psgl)));
pSRB->SGToBeXferLen = cpu_to_le32(sg_dma_len(psgl));
dc390_start_segment(pSRB);
}
else
pSRB->SGToBeXferLen = 0;
}
else
{
ResidCnt = (u32) DC390_read8 (Current_Fifo) & 0x1f;
ResidCnt |= (u32) DC390_read8 (CtcReg_High) << 16;
ResidCnt |= (u32) DC390_read8 (CtcReg_Mid) << 8;
ResidCnt += (u32) DC390_read8 (CtcReg_Low);
ResidCnt = ((u32) DC390_read8 (Current_Fifo) & 0x1f) +
(((u32) DC390_read8 (CtcReg_High) << 16) |
((u32) DC390_read8 (CtcReg_Mid) << 8) |
(u32) DC390_read8 (CtcReg_Low));
xferCnt = pSRB->SGToBeXferLen - ResidCnt;
pSRB->SGBusAddr += xferCnt;
pSRB->TotalXferredLen += xferCnt;
pSRB->SGToBeXferLen = ResidCnt;
dc390_advance_segment(pSRB, ResidCnt);
}
}
if ((*psstatus & 7) != SCSI_DATA_OUT)
{
DC390_write8 (DMA_Cmd, WRITE_DIRECTION+DMA_IDLE_CMD); /* | DMA_INT */
DC390_write8 (DMA_Cmd, WRITE_DIRECTION+DMA_IDLE_CMD);
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
}
}
static void
dc390_DataIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
dc390_DataIn_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
u8 sstatus, residual, bval;
struct scatterlist *psgl;
u32 ResidCnt, i;
u32 ResidCnt, i;
unsigned long xferCnt;
u8 *ptr;
sstatus = *psstatus;
@ -922,19 +855,17 @@ dc390_DataIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
DEBUG1(ResidCnt = ((unsigned long) DC390_read8 (CtcReg_High) << 16) \
+ ((unsigned long) DC390_read8 (CtcReg_Mid) << 8) \
+ ((unsigned long) DC390_read8 (CtcReg_Low)));
DEBUG1(printk (KERN_DEBUG "Count_2_Zero (ResidCnt=%i,ToBeXfer=%li),", ResidCnt, pSRB->SGToBeXferLen));
DEBUG1(printk (KERN_DEBUG "Count_2_Zero (ResidCnt=%u,ToBeXfer=%lu),", ResidCnt, pSRB->SGToBeXferLen));
DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); /* | DMA_INT */
DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
pSRB->SGIndex++;
if( pSRB->SGIndex < pSRB->SGcount )
{
pSRB->pSegmentList++;
psgl = pSRB->pSegmentList;
pSRB->SGBusAddr = cpu_to_le32(pci_dma_lo32(sg_dma_address(psgl)));
pSRB->SGToBeXferLen = cpu_to_le32(sg_dma_len(psgl));
dc390_start_segment(pSRB);
}
else
pSRB->SGToBeXferLen = 0;
@ -973,47 +904,45 @@ din_1:
}
/* It seems a DMA Blast abort isn't that bad ... */
if (!i) printk (KERN_ERR "DC390: DMA Blast aborted unfinished!\n");
//DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); /* | DMA_INT */
dc390_laststatus &= ~0xff000000; dc390_laststatus |= bval << 24;
//DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
dc390_laststatus &= ~0xff000000;
dc390_laststatus |= bval << 24;
DEBUG1(printk (KERN_DEBUG "Blast: Read %i times DMA_Status %02x", 0xa000-i, bval));
ResidCnt = (u32) DC390_read8 (CtcReg_High);
ResidCnt <<= 8;
ResidCnt |= (u32) DC390_read8 (CtcReg_Mid);
ResidCnt <<= 8;
ResidCnt |= (u32) DC390_read8 (CtcReg_Low);
ResidCnt = (((u32) DC390_read8 (CtcReg_High) << 16) |
((u32) DC390_read8 (CtcReg_Mid) << 8)) |
(u32) DC390_read8 (CtcReg_Low);
xferCnt = pSRB->SGToBeXferLen - ResidCnt;
pSRB->SGBusAddr += xferCnt;
pSRB->TotalXferredLen += xferCnt;
pSRB->SGToBeXferLen = ResidCnt;
xferCnt = dc390_advance_segment(pSRB, ResidCnt);
if (residual) {
size_t count = 1;
size_t offset = pSRB->SGBusAddr - sg_dma_address(pSRB->pSegmentList);
unsigned long flags;
u8 *ptr;
if( residual )
{
static int feedback_requested;
bval = DC390_read8 (ScsiFifo); /* get one residual byte */
if (!feedback_requested) {
feedback_requested = 1;
printk(KERN_WARNING "%s: Please, contact <linux-scsi@vger.kernel.org> "
"to help improve support for your system.\n", __FILE__);
local_irq_save(flags);
ptr = scsi_kmap_atomic_sg(pSRB->pSegmentList, pSRB->SGcount, &offset, &count);
if (likely(ptr)) {
*(ptr + offset) = bval;
scsi_kunmap_atomic_sg(ptr);
}
local_irq_restore(flags);
WARN_ON(!ptr);
ptr = (u8 *) bus_to_virt( pSRB->SGBusAddr );
*ptr = bval;
pSRB->SGBusAddr++; xferCnt++;
pSRB->TotalXferredLen++;
pSRB->SGToBeXferLen--;
/* 1 more byte read */
xferCnt += dc390_advance_segment(pSRB, pSRB->SGToBeXferLen - 1);
}
DEBUG1(printk (KERN_DEBUG "Xfered: %li, Total: %li, Remaining: %li\n", xferCnt,\
DEBUG1(printk (KERN_DEBUG "Xfered: %lu, Total: %lu, Remaining: %lu\n", xferCnt,\
pSRB->TotalXferredLen, pSRB->SGToBeXferLen));
}
}
if ((*psstatus & 7) != SCSI_DATA_IN)
{
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); /* | DMA_INT */
DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
}
}
@ -1216,7 +1145,7 @@ dc390_MsgIn_set_sync (struct dc390_acb* pACB, struct dc390_srb* pSRB)
/* handle RESTORE_PTR */
/* I presume, this command is already mapped, so, have to remap. */
/* This doesn't look very healthy... to-be-fixed */
static void
dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
{
@ -1225,6 +1154,7 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
pSRB->TotalXferredLen = 0;
pSRB->SGIndex = 0;
if (pcmd->use_sg) {
size_t saved;
pSRB->pSegmentList = (struct scatterlist *)pcmd->request_buffer;
psgl = pSRB->pSegmentList;
//dc390_pci_sync(pSRB);
@ -1236,15 +1166,16 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
if( pSRB->SGIndex < pSRB->SGcount )
{
pSRB->pSegmentList++;
psgl = pSRB->pSegmentList;
pSRB->SGBusAddr = cpu_to_le32(pci_dma_lo32(sg_dma_address(psgl)));
pSRB->SGToBeXferLen = cpu_to_le32(sg_dma_len(psgl));
dc390_start_segment(pSRB);
}
else
pSRB->SGToBeXferLen = 0;
}
pSRB->SGToBeXferLen -= (pSRB->Saved_Ptr - pSRB->TotalXferredLen);
pSRB->SGBusAddr += (pSRB->Saved_Ptr - pSRB->TotalXferredLen);
saved = pSRB->Saved_Ptr - pSRB->TotalXferredLen;
pSRB->SGToBeXferLen -= saved;
pSRB->SGBusAddr += saved;
printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n",
pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr);
@ -1365,7 +1296,6 @@ dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
static void
dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir)
{
struct scatterlist *psgl;
unsigned long lval;
struct dc390_dcb* pDCB = pACB->pActiveDCB;
@ -1391,12 +1321,11 @@ dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir)
if( pSRB->SGIndex < pSRB->SGcount )
{
DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir /* | DMA_INT */);
DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
if( !pSRB->SGToBeXferLen )
{
psgl = pSRB->pSegmentList;
pSRB->SGBusAddr = cpu_to_le32(pci_dma_lo32(sg_dma_address(psgl)));
pSRB->SGToBeXferLen = cpu_to_le32(sg_dma_len(psgl));
dc390_start_segment(pSRB);
DEBUG1(printk (KERN_DEBUG " DC390: Next SG segment."));
}
lval = pSRB->SGToBeXferLen;
@ -1410,12 +1339,12 @@ dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir)
DC390_write32 (DMA_XferCnt, pSRB->SGToBeXferLen);
DC390_write32 (DMA_XferAddr, pSRB->SGBusAddr);
//DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); /* | DMA_INT; */
//DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
pSRB->SRBState = SRB_DATA_XFER;
DC390_write8 (ScsiCmd, DMA_COMMAND+INFO_XFER_CMD);
DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir | DMA_INT);
DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir);
//DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT));
//DEBUG1(printk (KERN_DEBUG "DC390: DMA_Status: %02x\n", DC390_read8 (DMA_Status)));
//DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT));
@ -1436,8 +1365,8 @@ dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir)
pSRB->SRBState |= SRB_XFERPAD;
DC390_write8 (ScsiCmd, DMA_COMMAND+XFER_PAD_BYTE);
/*
DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); // | DMA_INT;
DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir | DMA_INT);
DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir);
*/
}
}

View File

@ -19,14 +19,6 @@
#define SEL_TIMEOUT 153 /* 250 ms selection timeout (@ 40 MHz) */
#define pci_dma_lo32(a) (a & 0xffffffff)
typedef u8 UCHAR; /* 8 bits */
typedef u16 USHORT; /* 16 bits */
typedef u32 UINT; /* 32 bits */
typedef unsigned long ULONG; /* 32/64 bits */
/*
;-----------------------------------------------------------------------
; SCSI Request Block
@ -43,7 +35,9 @@ struct scatterlist *pSegmentList;
struct scatterlist Segmentx; /* make a one entry of S/G list table */
unsigned long SGBusAddr; /*;a segment starting address as seen by AM53C974A*/
unsigned long SGBusAddr; /*;a segment starting address as seen by AM53C974A
in CPU endianness. We're only getting 32-bit bus
addresses by default */
unsigned long SGToBeXferLen; /*; to be xfer length */
unsigned long TotalXferredLen;
unsigned long SavedTotXLen;

View File

@ -314,8 +314,7 @@ struct scsi_core {
struct list_head task_queue;
int task_queue_size;
struct semaphore queue_thread_sema;
int queue_thread_kill;
struct task_struct *queue_thread;
};
struct sas_ha_event {