mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
Merge branch '5.1/scsi-fixes' into 5.2/merge
We have a few submissions for 5.2 that depend on fixes merged post 5.1-rc1. Merge the fixes branch into queue. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
17631462cd
@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
|
||||
add_timer(&erp_action->timer);
|
||||
}
|
||||
|
||||
void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
|
||||
int clear, char *dbftag)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct zfcp_port *port;
|
||||
|
||||
write_lock_irqsave(&adapter->erp_lock, flags);
|
||||
read_lock(&adapter->port_list_lock);
|
||||
list_for_each_entry(port, &adapter->port_list, list)
|
||||
_zfcp_erp_port_forced_reopen(port, clear, dbftag);
|
||||
read_unlock(&adapter->port_list_lock);
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
}
|
||||
|
||||
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
|
||||
int clear, char *dbftag)
|
||||
{
|
||||
@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
|
||||
struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
|
||||
int lun_status;
|
||||
|
||||
if (sdev->sdev_state == SDEV_DEL ||
|
||||
sdev->sdev_state == SDEV_CANCEL)
|
||||
continue;
|
||||
if (zsdev->port != port)
|
||||
continue;
|
||||
/* LUN under port of interest */
|
||||
|
@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
|
||||
char *dbftag);
|
||||
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
|
||||
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
|
||||
extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
|
||||
int clear, char *dbftag);
|
||||
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
|
||||
extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
|
||||
extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
|
||||
|
@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
|
||||
list_for_each_entry(port, &adapter->port_list, list) {
|
||||
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
|
||||
zfcp_fc_test_link(port);
|
||||
if (!port->d_id)
|
||||
zfcp_erp_port_reopen(port,
|
||||
ZFCP_STATUS_COMMON_ERP_FAILED,
|
||||
"fcrscn1");
|
||||
}
|
||||
read_unlock_irqrestore(&adapter->port_list_lock, flags);
|
||||
}
|
||||
@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
|
||||
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
|
||||
{
|
||||
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
|
||||
struct zfcp_adapter *adapter = fsf_req->adapter;
|
||||
struct fc_els_rscn *head;
|
||||
struct fc_els_rscn_page *page;
|
||||
u16 i;
|
||||
@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
|
||||
no_entries = be16_to_cpu(head->rscn_plen) /
|
||||
sizeof(struct fc_els_rscn_page);
|
||||
|
||||
if (no_entries > 1) {
|
||||
/* handle failed ports */
|
||||
unsigned long flags;
|
||||
struct zfcp_port *port;
|
||||
|
||||
read_lock_irqsave(&adapter->port_list_lock, flags);
|
||||
list_for_each_entry(port, &adapter->port_list, list) {
|
||||
if (port->d_id)
|
||||
continue;
|
||||
zfcp_erp_port_reopen(port,
|
||||
ZFCP_STATUS_COMMON_ERP_FAILED,
|
||||
"fcrscn1");
|
||||
}
|
||||
read_unlock_irqrestore(&adapter->port_list_lock, flags);
|
||||
}
|
||||
|
||||
for (i = 1; i < no_entries; i++) {
|
||||
/* skip head and start with 1st element */
|
||||
page++;
|
||||
|
@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
|
||||
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
|
||||
int ret = SUCCESS, fc_ret;
|
||||
|
||||
if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
|
||||
zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
|
||||
zfcp_erp_wait(adapter);
|
||||
}
|
||||
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
|
||||
zfcp_erp_wait(adapter);
|
||||
fc_ret = fc_block_scsi_eh(scpnt);
|
||||
|
@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
|
||||
return capacity;
|
||||
}
|
||||
|
||||
static inline int aac_pci_offline(struct aac_dev *dev)
|
||||
{
|
||||
return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
|
||||
}
|
||||
|
||||
static inline int aac_adapter_check_health(struct aac_dev *dev)
|
||||
{
|
||||
if (unlikely(pci_channel_offline(dev->pdev)))
|
||||
if (unlikely(aac_pci_offline(dev)))
|
||||
return -1;
|
||||
|
||||
return (dev)->a_ops.adapter_check_health(dev);
|
||||
|
@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (unlikely(pci_channel_offline(dev->pdev)))
|
||||
if (unlikely(aac_pci_offline(dev)))
|
||||
return -EFAULT;
|
||||
|
||||
if ((blink = aac_adapter_check_health(dev)) > 0) {
|
||||
@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
|
||||
|
||||
spin_unlock_irqrestore(&fibptr->event_lock, flags);
|
||||
|
||||
if (unlikely(pci_channel_offline(dev->pdev)))
|
||||
if (unlikely(aac_pci_offline(dev)))
|
||||
return -EFAULT;
|
||||
|
||||
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
|
||||
|
@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
|
||||
}
|
||||
|
||||
out:
|
||||
if (req->nsge > 0)
|
||||
if (req->nsge > 0) {
|
||||
scsi_dma_unmap(cmnd);
|
||||
if (req->dcopy && (host_status == DID_OK))
|
||||
host_status = csio_scsi_copy_to_sgl(hw, req);
|
||||
}
|
||||
|
||||
cmnd->result = (((host_status) << 16) | scsi_status);
|
||||
cmnd->scsi_done(cmnd);
|
||||
|
@ -1801,6 +1801,12 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
|
||||
}
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
|
||||
if (dev_is_sata(device)) {
|
||||
rc = hisi_sas_softreset_ata_disk(device);
|
||||
if (rc)
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
}
|
||||
|
||||
rc = hisi_sas_debug_I_T_nexus_reset(device);
|
||||
|
||||
if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
|
||||
|
@ -139,6 +139,7 @@ static const struct {
|
||||
{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
|
||||
|
||||
{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
|
||||
{ IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
|
||||
};
|
||||
|
||||
static void ibmvfc_npiv_login(struct ibmvfc_host *);
|
||||
@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
|
||||
if (rsp->flags & FCP_RSP_LEN_VALID)
|
||||
rsp_code = rsp->data.info.rsp_code;
|
||||
|
||||
scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
|
||||
scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
|
||||
"flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
|
||||
cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
|
||||
cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
|
||||
rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
|
||||
}
|
||||
|
||||
@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
|
||||
sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
|
||||
"flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
|
||||
ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
|
||||
rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
|
||||
be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
|
||||
fc_rsp->scsi_status);
|
||||
rsp_rc = -EIO;
|
||||
} else
|
||||
@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
|
||||
sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
|
||||
"flags: %x fcp_rsp: %x, scsi_status: %x\n",
|
||||
ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
|
||||
rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
|
||||
be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
|
||||
fc_rsp->scsi_status);
|
||||
rsp_rc = -EIO;
|
||||
} else
|
||||
@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
|
||||
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
|
||||
if (crq->format == IBMVFC_PARTITION_MIGRATED) {
|
||||
/* We need to re-setup the interpartition connection */
|
||||
dev_info(vhost->dev, "Re-enabling adapter\n");
|
||||
dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
|
||||
vhost->client_migrated = 1;
|
||||
ibmvfc_purge_requests(vhost, DID_REQUEUE);
|
||||
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
|
||||
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
|
||||
} else {
|
||||
dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
|
||||
} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
|
||||
dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
|
||||
ibmvfc_purge_requests(vhost, DID_ERROR);
|
||||
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
|
||||
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
|
||||
} else {
|
||||
dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
|
||||
}
|
||||
return;
|
||||
case IBMVFC_CRQ_CMD_RSP:
|
||||
@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
|
||||
|
||||
tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
|
||||
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
|
||||
rsp->status, rsp->error, status);
|
||||
be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
|
||||
|
||||
tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
|
||||
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
|
||||
ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
|
||||
ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
|
||||
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
|
||||
be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
|
||||
ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
|
||||
ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
|
||||
fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
|
||||
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
|
||||
ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
|
||||
mad->iu.status, mad->iu.error,
|
||||
be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
|
||||
ibmvfc_get_fc_type(fc_reason), fc_reason,
|
||||
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
|
||||
break;
|
||||
@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
|
||||
|
||||
tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
|
||||
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
|
||||
rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
|
||||
rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
|
||||
rsp->fc_explain, status);
|
||||
be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
|
||||
ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
|
||||
ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
|
||||
status);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
|
||||
level += ibmvfc_retry_host_init(vhost);
|
||||
ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
|
||||
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
|
||||
rsp->status, rsp->error);
|
||||
be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
|
||||
break;
|
||||
case IBMVFC_MAD_DRIVER_FAILED:
|
||||
break;
|
||||
@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
|
||||
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
|
||||
ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
|
||||
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
|
||||
rsp->status, rsp->error);
|
||||
be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
|
||||
ibmvfc_free_event(evt);
|
||||
return;
|
||||
case IBMVFC_MAD_CRQ_ERROR:
|
||||
|
@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
|
||||
IBMVFC_CRQ_XPORT_EVENT = 0xFF,
|
||||
};
|
||||
|
||||
enum ibmvfc_crq_format {
|
||||
enum ibmvfc_crq_init_msg {
|
||||
IBMVFC_CRQ_INIT = 0x01,
|
||||
IBMVFC_CRQ_INIT_COMPLETE = 0x02,
|
||||
};
|
||||
|
||||
enum ibmvfc_crq_xport_evts {
|
||||
IBMVFC_PARTNER_FAILED = 0x01,
|
||||
IBMVFC_PARTNER_DEREGISTER = 0x02,
|
||||
IBMVFC_PARTITION_MIGRATED = 0x06,
|
||||
};
|
||||
|
||||
|
@ -96,6 +96,7 @@ static int client_reserve = 1;
|
||||
static char partition_name[96] = "UNKNOWN";
|
||||
static unsigned int partition_number = -1;
|
||||
static LIST_HEAD(ibmvscsi_head);
|
||||
static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
|
||||
|
||||
static struct scsi_transport_template *ibmvscsi_transport_template;
|
||||
|
||||
@ -2270,7 +2271,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
}
|
||||
|
||||
dev_set_drvdata(&vdev->dev, hostdata);
|
||||
spin_lock(&ibmvscsi_driver_lock);
|
||||
list_add_tail(&hostdata->host_list, &ibmvscsi_head);
|
||||
spin_unlock(&ibmvscsi_driver_lock);
|
||||
return 0;
|
||||
|
||||
add_srp_port_failed:
|
||||
@ -2292,15 +2295,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
static int ibmvscsi_remove(struct vio_dev *vdev)
|
||||
{
|
||||
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
|
||||
list_del(&hostdata->host_list);
|
||||
unmap_persist_bufs(hostdata);
|
||||
unsigned long flags;
|
||||
|
||||
srp_remove_host(hostdata->host);
|
||||
scsi_remove_host(hostdata->host);
|
||||
|
||||
purge_requests(hostdata, DID_ERROR);
|
||||
|
||||
spin_lock_irqsave(hostdata->host->host_lock, flags);
|
||||
release_event_pool(&hostdata->pool, hostdata);
|
||||
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
|
||||
|
||||
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
|
||||
max_events);
|
||||
|
||||
kthread_stop(hostdata->work_thread);
|
||||
srp_remove_host(hostdata->host);
|
||||
scsi_remove_host(hostdata->host);
|
||||
unmap_persist_bufs(hostdata);
|
||||
|
||||
spin_lock(&ibmvscsi_driver_lock);
|
||||
list_del(&hostdata->host_list);
|
||||
spin_unlock(&ibmvscsi_driver_lock);
|
||||
|
||||
scsi_host_put(hostdata->host);
|
||||
|
||||
return 0;
|
||||
|
@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
||||
* wake up the thread.
|
||||
*/
|
||||
spin_lock(&lpfc_cmd->buf_lock);
|
||||
if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) {
|
||||
lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
|
||||
if (lpfc_cmd->waitq)
|
||||
wake_up(lpfc_cmd->waitq);
|
||||
lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
|
||||
if (lpfc_cmd->waitq) {
|
||||
wake_up(lpfc_cmd->waitq);
|
||||
lpfc_cmd->waitq = NULL;
|
||||
}
|
||||
spin_unlock(&lpfc_cmd->buf_lock);
|
||||
|
@ -3397,12 +3397,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
||||
|
||||
if (smid < ioc->hi_priority_smid) {
|
||||
struct scsiio_tracker *st;
|
||||
void *request;
|
||||
|
||||
st = _get_st_from_smid(ioc, smid);
|
||||
if (!st) {
|
||||
_base_recovery_check(ioc);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clear MPI request frame */
|
||||
request = mpt3sas_base_get_msg_frame(ioc, smid);
|
||||
memset(request, 0, ioc->request_sz);
|
||||
|
||||
mpt3sas_base_clear_st(ioc, st);
|
||||
_base_recovery_check(ioc);
|
||||
return;
|
||||
|
@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
||||
{
|
||||
struct scsi_cmnd *scmd = NULL;
|
||||
struct scsiio_tracker *st;
|
||||
Mpi25SCSIIORequest_t *mpi_request;
|
||||
|
||||
if (smid > 0 &&
|
||||
smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
|
||||
u32 unique_tag = smid - 1;
|
||||
|
||||
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
|
||||
|
||||
/*
|
||||
* If SCSI IO request is outstanding at driver level then
|
||||
* DevHandle filed must be non-zero. If DevHandle is zero
|
||||
* then it means that this smid is free at driver level,
|
||||
* so return NULL.
|
||||
*/
|
||||
if (!mpi_request->DevHandle)
|
||||
return scmd;
|
||||
|
||||
scmd = scsi_host_find_tag(ioc->shost, unique_tag);
|
||||
if (scmd) {
|
||||
st = scsi_cmd_priv(scmd);
|
||||
|
@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
|
||||
|
||||
static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
|
||||
{
|
||||
struct qedi_nvm_iscsi_image nvm_image;
|
||||
|
||||
qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
|
||||
sizeof(nvm_image),
|
||||
sizeof(struct qedi_nvm_iscsi_image),
|
||||
&qedi->nvm_buf_dma, GFP_KERNEL);
|
||||
if (!qedi->iscsi_image) {
|
||||
QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
|
||||
@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data)
|
||||
static int qedi_get_boot_info(struct qedi_ctx *qedi)
|
||||
{
|
||||
int ret = 1;
|
||||
struct qedi_nvm_iscsi_image nvm_image;
|
||||
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
||||
"Get NVM iSCSI CFG image\n");
|
||||
ret = qedi_ops->common->nvm_get_image(qedi->cdev,
|
||||
QED_NVM_IMAGE_ISCSI_CFG,
|
||||
(char *)qedi->iscsi_image,
|
||||
sizeof(nvm_image));
|
||||
sizeof(struct qedi_nvm_iscsi_image));
|
||||
if (ret)
|
||||
QEDI_ERR(&qedi->dbg_ctx,
|
||||
"Could not get NVM image. ret = %d\n", ret);
|
||||
|
@ -5072,6 +5072,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
|
||||
if ((domain & 0xf0) == 0xf0)
|
||||
continue;
|
||||
|
||||
/* Bypass if not same domain and area of adapter. */
|
||||
if (area && domain && ((area != vha->d_id.b.area) ||
|
||||
(domain != vha->d_id.b.domain)) &&
|
||||
(ha->current_topology == ISP_CFG_NL))
|
||||
continue;
|
||||
|
||||
|
||||
/* Bypass invalid local loop ID. */
|
||||
if (loop_id > LAST_LOCAL_LOOP_ID)
|
||||
continue;
|
||||
|
@ -1474,7 +1474,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
|
||||
goto eh_reset_failed;
|
||||
}
|
||||
err = 2;
|
||||
if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1)
|
||||
if (do_reset(fcport, cmd->device->lun, 1)
|
||||
!= QLA_SUCCESS) {
|
||||
ql_log(ql_log_warn, vha, 0x800c,
|
||||
"do_reset failed for cmd=%p.\n", cmd);
|
||||
|
@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
|
||||
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
|
||||
return -EINVAL;
|
||||
ep = iscsi_lookup_endpoint(transport_fd);
|
||||
if (!ep)
|
||||
return -EINVAL;
|
||||
conn = cls_conn->dd_data;
|
||||
qla_conn = conn->dd_data;
|
||||
qla_conn->qla_ep = ep->dd_data;
|
||||
|
@ -238,6 +238,7 @@ static struct {
|
||||
{"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
|
||||
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
|
||||
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
|
||||
{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
|
||||
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
|
||||
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
|
||||
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
|
||||
|
@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
|
||||
{"NETAPP", "INF-01-00", "rdac", },
|
||||
{"LSI", "INF-01-00", "rdac", },
|
||||
{"ENGENIO", "INF-01-00", "rdac", },
|
||||
{"LENOVO", "DE_Series", "rdac", },
|
||||
{NULL, NULL, NULL },
|
||||
};
|
||||
|
||||
|
@ -585,9 +585,16 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
|
||||
if (!blk_rq_is_scsi(req)) {
|
||||
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
|
||||
cmd->flags &= ~SCMD_INITIALIZED;
|
||||
destroy_rcu_head(&cmd->rcu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calling rcu_barrier() is not necessary here because the
|
||||
* SCSI error handler guarantees that the function called by
|
||||
* call_rcu() has been called before scsi_end_request() is
|
||||
* called.
|
||||
*/
|
||||
destroy_rcu_head(&cmd->rcu);
|
||||
|
||||
/*
|
||||
* In the MQ case the command gets freed by __blk_mq_end_request,
|
||||
* so we have to do all cleanup that depends on it earlier.
|
||||
@ -2541,8 +2548,10 @@ void scsi_device_resume(struct scsi_device *sdev)
|
||||
* device deleted during suspend)
|
||||
*/
|
||||
mutex_lock(&sdev->state_mutex);
|
||||
sdev->quiesced_by = NULL;
|
||||
blk_clear_pm_only(sdev->request_queue);
|
||||
if (sdev->quiesced_by) {
|
||||
sdev->quiesced_by = NULL;
|
||||
blk_clear_pm_only(sdev->request_queue);
|
||||
}
|
||||
if (sdev->sdev_state == SDEV_QUIESCE)
|
||||
scsi_device_set_state(sdev, SDEV_RUNNING);
|
||||
mutex_unlock(&sdev->state_mutex);
|
||||
|
@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
mutex_lock(&sdev->state_mutex);
|
||||
ret = scsi_device_set_state(sdev, state);
|
||||
/*
|
||||
* If the device state changes to SDEV_RUNNING, we need to run
|
||||
* the queue to avoid I/O hang.
|
||||
*/
|
||||
if (ret == 0 && state == SDEV_RUNNING)
|
||||
blk_mq_run_hw_queues(sdev->request_queue, true);
|
||||
mutex_unlock(&sdev->state_mutex);
|
||||
|
||||
return ret == 0 ? count : -EINVAL;
|
||||
|
@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
|
||||
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
|
||||
/* flush running scans then delete devices */
|
||||
flush_work(&session->scan_work);
|
||||
/* flush running unbind operations */
|
||||
flush_work(&session->unbind_work);
|
||||
__iscsi_unbind_session(&session->unbind_work);
|
||||
|
||||
/* hw iscsi may not have removed all connections from session */
|
||||
|
@ -1416,11 +1416,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
|
||||
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX and what if there are packets in flight and this close()
|
||||
* XXX is followed by a "rmmod sd_mod"?
|
||||
*/
|
||||
|
||||
scsi_disk_put(sdkp);
|
||||
}
|
||||
|
||||
@ -3077,6 +3072,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
|
||||
unsigned int opt_xfer_bytes =
|
||||
logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
|
||||
|
||||
if (sdkp->opt_xfer_blocks == 0)
|
||||
return false;
|
||||
|
||||
if (sdkp->opt_xfer_blocks > dev_max) {
|
||||
sd_first_printk(KERN_WARNING, sdkp,
|
||||
"Optimal transfer size %u logical blocks " \
|
||||
@ -3483,9 +3481,21 @@ static void scsi_disk_release(struct device *dev)
|
||||
{
|
||||
struct scsi_disk *sdkp = to_scsi_disk(dev);
|
||||
struct gendisk *disk = sdkp->disk;
|
||||
|
||||
struct request_queue *q = disk->queue;
|
||||
|
||||
ida_free(&sd_index_ida, sdkp->index);
|
||||
|
||||
/*
|
||||
* Wait until all requests that are in progress have completed.
|
||||
* This is necessary to avoid that e.g. scsi_end_request() crashes
|
||||
* due to clearing the disk->private_data pointer. Wait from inside
|
||||
* scsi_disk_release() instead of from sd_release() to avoid that
|
||||
* freezing and unfreezing the request queue affects user space I/O
|
||||
* in case multiple processes open a /dev/sd... node concurrently.
|
||||
*/
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
disk->private_data = NULL;
|
||||
put_disk(disk);
|
||||
put_device(&sdkp->device->sdev_gendev);
|
||||
|
@ -385,7 +385,7 @@ enum storvsc_request_type {
|
||||
* This is the end of Protocol specific defines.
|
||||
*/
|
||||
|
||||
static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
|
||||
static int storvsc_ringbuffer_size = (128 * 1024);
|
||||
static u32 max_outstanding_req_per_channel;
|
||||
|
||||
static int storvsc_vcpus_per_sub_channel = 4;
|
||||
@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
|
||||
{
|
||||
struct device *dev = &device->device;
|
||||
struct storvsc_device *stor_device;
|
||||
int num_cpus = num_online_cpus();
|
||||
int num_sc;
|
||||
struct storvsc_cmd_request *request;
|
||||
struct vstor_packet *vstor_packet;
|
||||
int ret, t;
|
||||
|
||||
num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
|
||||
/*
|
||||
* If the number of CPUs is artificially restricted, such as
|
||||
* with maxcpus=1 on the kernel boot line, Hyper-V could offer
|
||||
* sub-channels >= the number of CPUs. These sub-channels
|
||||
* should not be created. The primary channel is already created
|
||||
* and assigned to one CPU, so check against # CPUs - 1.
|
||||
*/
|
||||
num_sc = min((int)(num_online_cpus() - 1), max_chns);
|
||||
if (!num_sc)
|
||||
return;
|
||||
|
||||
stor_device = get_out_stor_device(device);
|
||||
if (!stor_device)
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user