SCSI fixes on 20190125
Six fixes, all of which appear to have user visible consequences. The DMA one is a regression fix from the merge window and of the others, four are driver specific and one specific to the target code. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCXEt6GSYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishbxoAP0R0z0J fdCRn9AMRbZ3xJzD+E6V2gw38Gdgm8rR5mT1gQEAv1VxZwOAb9Qxbcr+l4uybqZr Q55H31I/IvBgNm84jH0= =vkW7 -----END PGP SIGNATURE----- Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI fixes from James Bottomley: "Six fixes, all of which appear to have user visible consequences. The DMA one is a regression fix from the merge window and of the others, four are driver specific and one specific to the target code" * tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: scsi: ufs: Use explicit access size in ufshcd_dump_regs scsi: tcmu: fix use after free scsi: csiostor: fix NULL pointer dereference in csio_vport_set_state() scsi: lpfc: nvmet: avoid hang / use-after-free when destroying targetport scsi: lpfc: nvme: avoid hang / use-after-free when destroying localport scsi: communicate max segment size to the DMA mapping code
This commit is contained in:
commit
7930851ef1
@ -915,6 +915,10 @@ static struct scsi_host_template pata_macio_sht = {
|
|||||||
.sg_tablesize = MAX_DCMDS,
|
.sg_tablesize = MAX_DCMDS,
|
||||||
/* We may not need that strict one */
|
/* We may not need that strict one */
|
||||||
.dma_boundary = ATA_DMA_BOUNDARY,
|
.dma_boundary = ATA_DMA_BOUNDARY,
|
||||||
|
/* Not sure what the real max is but we know it's less than 64K, let's
|
||||||
|
* use 64K minus 256
|
||||||
|
*/
|
||||||
|
.max_segment_size = MAX_DBDMA_SEG,
|
||||||
.slave_configure = pata_macio_slave_config,
|
.slave_configure = pata_macio_slave_config,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1044,11 +1048,6 @@ static int pata_macio_common_init(struct pata_macio_priv *priv,
|
|||||||
/* Make sure we have sane initial timings in the cache */
|
/* Make sure we have sane initial timings in the cache */
|
||||||
pata_macio_default_timings(priv);
|
pata_macio_default_timings(priv);
|
||||||
|
|
||||||
/* Not sure what the real max is but we know it's less than 64K, let's
|
|
||||||
* use 64K minus 256
|
|
||||||
*/
|
|
||||||
dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
|
|
||||||
|
|
||||||
/* Allocate libata host for 1 port */
|
/* Allocate libata host for 1 port */
|
||||||
memset(&pinfo, 0, sizeof(struct ata_port_info));
|
memset(&pinfo, 0, sizeof(struct ata_port_info));
|
||||||
pmac_macio_calc_timing_masks(priv, &pinfo);
|
pmac_macio_calc_timing_masks(priv, &pinfo);
|
||||||
|
@ -245,8 +245,15 @@ struct inic_port_priv {
|
|||||||
|
|
||||||
static struct scsi_host_template inic_sht = {
|
static struct scsi_host_template inic_sht = {
|
||||||
ATA_BASE_SHT(DRV_NAME),
|
ATA_BASE_SHT(DRV_NAME),
|
||||||
.sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
|
.sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
|
||||||
.dma_boundary = INIC_DMA_BOUNDARY,
|
|
||||||
|
/*
|
||||||
|
* This controller is braindamaged. dma_boundary is 0xffff like others
|
||||||
|
* but it will lock up the whole machine HARD if 65536 byte PRD entry
|
||||||
|
* is fed. Reduce maximum segment size.
|
||||||
|
*/
|
||||||
|
.dma_boundary = INIC_DMA_BOUNDARY,
|
||||||
|
.max_segment_size = 65536 - 512,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const int scr_map[] = {
|
static const int scr_map[] = {
|
||||||
@ -868,17 +875,6 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This controller is braindamaged. dma_boundary is 0xffff
|
|
||||||
* like others but it will lock up the whole machine HARD if
|
|
||||||
* 65536 byte PRD entry is fed. Reduce maximum segment size.
|
|
||||||
*/
|
|
||||||
rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512);
|
|
||||||
if (rc) {
|
|
||||||
dev_err(&pdev->dev, "failed to set the maximum segment size\n");
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
|
rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(&pdev->dev, "failed to initialize controller\n");
|
dev_err(&pdev->dev, "failed to initialize controller\n");
|
||||||
|
@ -1144,10 +1144,6 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
|
|||||||
if (device->is_local)
|
if (device->is_local)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
|
|
||||||
WARN_ON(dma_set_max_seg_size(device->card->device,
|
|
||||||
SBP2_MAX_SEG_SIZE));
|
|
||||||
|
|
||||||
shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
|
shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
|
||||||
if (shost == NULL)
|
if (shost == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -1610,6 +1606,7 @@ static struct scsi_host_template scsi_driver_template = {
|
|||||||
.eh_abort_handler = sbp2_scsi_abort,
|
.eh_abort_handler = sbp2_scsi_abort,
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
|
.max_segment_size = SBP2_MAX_SEG_SIZE,
|
||||||
.can_queue = 1,
|
.can_queue = 1,
|
||||||
.sdev_attrs = sbp2_scsi_sysfs_attrs,
|
.sdev_attrs = sbp2_scsi_sysfs_attrs,
|
||||||
};
|
};
|
||||||
|
@ -1747,11 +1747,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
|
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
|
||||||
}
|
}
|
||||||
|
|
||||||
error = dma_set_max_seg_size(&pdev->dev,
|
if (aac->adapter_info.options & AAC_OPT_NEW_COMM)
|
||||||
(aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
|
shost->max_segment_size = shost->max_sectors << 9;
|
||||||
(shost->max_sectors << 9) : 65536);
|
else
|
||||||
if (error)
|
shost->max_segment_size = 65536;
|
||||||
goto out_deinit;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Firmware printf works only with older firmware.
|
* Firmware printf works only with older firmware.
|
||||||
|
@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable)
|
|||||||
}
|
}
|
||||||
|
|
||||||
fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
|
fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
|
||||||
|
ln->fc_vport = fc_vport;
|
||||||
|
|
||||||
if (csio_fcoe_alloc_vnp(hw, ln))
|
if (csio_fcoe_alloc_vnp(hw, ln))
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
*(struct csio_lnode **)fc_vport->dd_data = ln;
|
*(struct csio_lnode **)fc_vport->dd_data = ln;
|
||||||
ln->fc_vport = fc_vport;
|
|
||||||
if (!fc_vport->node_name)
|
if (!fc_vport->node_name)
|
||||||
fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
|
fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
|
||||||
if (!fc_vport->port_name)
|
if (!fc_vport->port_name)
|
||||||
|
@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
|
|||||||
lport);
|
lport);
|
||||||
|
|
||||||
/* release any threads waiting for the unreg to complete */
|
/* release any threads waiting for the unreg to complete */
|
||||||
complete(&lport->lport_unreg_done);
|
if (lport->vport->localport)
|
||||||
|
complete(lport->lport_unreg_cmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* lpfc_nvme_remoteport_delete
|
/* lpfc_nvme_remoteport_delete
|
||||||
@ -2545,7 +2546,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
|||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
|
lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
|
||||||
struct lpfc_nvme_lport *lport)
|
struct lpfc_nvme_lport *lport,
|
||||||
|
struct completion *lport_unreg_cmp)
|
||||||
{
|
{
|
||||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||||
u32 wait_tmo;
|
u32 wait_tmo;
|
||||||
@ -2557,8 +2559,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
|
|||||||
*/
|
*/
|
||||||
wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
|
wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
|
||||||
while (true) {
|
while (true) {
|
||||||
ret = wait_for_completion_timeout(&lport->lport_unreg_done,
|
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
|
||||||
wait_tmo);
|
|
||||||
if (unlikely(!ret)) {
|
if (unlikely(!ret)) {
|
||||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
|
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
|
||||||
"6176 Lport %p Localport %p wait "
|
"6176 Lport %p Localport %p wait "
|
||||||
@ -2592,12 +2593,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
|||||||
struct lpfc_nvme_lport *lport;
|
struct lpfc_nvme_lport *lport;
|
||||||
struct lpfc_nvme_ctrl_stat *cstat;
|
struct lpfc_nvme_ctrl_stat *cstat;
|
||||||
int ret;
|
int ret;
|
||||||
|
DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
|
||||||
|
|
||||||
if (vport->nvmei_support == 0)
|
if (vport->nvmei_support == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
localport = vport->localport;
|
localport = vport->localport;
|
||||||
vport->localport = NULL;
|
|
||||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||||
cstat = lport->cstat;
|
cstat = lport->cstat;
|
||||||
|
|
||||||
@ -2608,13 +2609,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
|||||||
/* lport's rport list is clear. Unregister
|
/* lport's rport list is clear. Unregister
|
||||||
* lport and release resources.
|
* lport and release resources.
|
||||||
*/
|
*/
|
||||||
init_completion(&lport->lport_unreg_done);
|
lport->lport_unreg_cmp = &lport_unreg_cmp;
|
||||||
ret = nvme_fc_unregister_localport(localport);
|
ret = nvme_fc_unregister_localport(localport);
|
||||||
|
|
||||||
/* Wait for completion. This either blocks
|
/* Wait for completion. This either blocks
|
||||||
* indefinitely or succeeds
|
* indefinitely or succeeds
|
||||||
*/
|
*/
|
||||||
lpfc_nvme_lport_unreg_wait(vport, lport);
|
lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
|
||||||
|
vport->localport = NULL;
|
||||||
kfree(cstat);
|
kfree(cstat);
|
||||||
|
|
||||||
/* Regardless of the unregister upcall response, clear
|
/* Regardless of the unregister upcall response, clear
|
||||||
|
@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat {
|
|||||||
/* Declare nvme-based local and remote port definitions. */
|
/* Declare nvme-based local and remote port definitions. */
|
||||||
struct lpfc_nvme_lport {
|
struct lpfc_nvme_lport {
|
||||||
struct lpfc_vport *vport;
|
struct lpfc_vport *vport;
|
||||||
struct completion lport_unreg_done;
|
struct completion *lport_unreg_cmp;
|
||||||
/* Add stats counters here */
|
/* Add stats counters here */
|
||||||
struct lpfc_nvme_ctrl_stat *cstat;
|
struct lpfc_nvme_ctrl_stat *cstat;
|
||||||
atomic_t fc4NvmeLsRequests;
|
atomic_t fc4NvmeLsRequests;
|
||||||
|
@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
|
|||||||
struct lpfc_nvmet_tgtport *tport = targetport->private;
|
struct lpfc_nvmet_tgtport *tport = targetport->private;
|
||||||
|
|
||||||
/* release any threads waiting for the unreg to complete */
|
/* release any threads waiting for the unreg to complete */
|
||||||
complete(&tport->tport_unreg_done);
|
if (tport->phba->targetport)
|
||||||
|
complete(tport->tport_unreg_cmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1692,6 +1693,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
|||||||
struct lpfc_nvmet_tgtport *tgtp;
|
struct lpfc_nvmet_tgtport *tgtp;
|
||||||
struct lpfc_queue *wq;
|
struct lpfc_queue *wq;
|
||||||
uint32_t qidx;
|
uint32_t qidx;
|
||||||
|
DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
|
||||||
|
|
||||||
if (phba->nvmet_support == 0)
|
if (phba->nvmet_support == 0)
|
||||||
return;
|
return;
|
||||||
@ -1701,9 +1703,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
|||||||
wq = phba->sli4_hba.nvme_wq[qidx];
|
wq = phba->sli4_hba.nvme_wq[qidx];
|
||||||
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
|
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
|
||||||
}
|
}
|
||||||
init_completion(&tgtp->tport_unreg_done);
|
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
|
||||||
nvmet_fc_unregister_targetport(phba->targetport);
|
nvmet_fc_unregister_targetport(phba->targetport);
|
||||||
wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
|
wait_for_completion_timeout(&tport_unreg_cmp, 5);
|
||||||
lpfc_nvmet_cleanup_io_context(phba);
|
lpfc_nvmet_cleanup_io_context(phba);
|
||||||
}
|
}
|
||||||
phba->targetport = NULL;
|
phba->targetport = NULL;
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
/* Used for NVME Target */
|
/* Used for NVME Target */
|
||||||
struct lpfc_nvmet_tgtport {
|
struct lpfc_nvmet_tgtport {
|
||||||
struct lpfc_hba *phba;
|
struct lpfc_hba *phba;
|
||||||
struct completion tport_unreg_done;
|
struct completion *tport_unreg_cmp;
|
||||||
|
|
||||||
/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
|
/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
|
||||||
atomic_t rcv_ls_req_in;
|
atomic_t rcv_ls_req_in;
|
||||||
|
@ -1842,8 +1842,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
|
|||||||
blk_queue_segment_boundary(q, shost->dma_boundary);
|
blk_queue_segment_boundary(q, shost->dma_boundary);
|
||||||
dma_set_seg_boundary(dev, shost->dma_boundary);
|
dma_set_seg_boundary(dev, shost->dma_boundary);
|
||||||
|
|
||||||
blk_queue_max_segment_size(q,
|
blk_queue_max_segment_size(q, shost->max_segment_size);
|
||||||
min(shost->max_segment_size, dma_get_max_seg_size(dev)));
|
dma_set_max_seg_size(dev, shost->max_segment_size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set a reasonable default alignment: The larger of 32-byte (dword),
|
* Set a reasonable default alignment: The larger of 32-byte (dword),
|
||||||
|
@ -108,13 +108,19 @@
|
|||||||
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
|
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
|
||||||
const char *prefix)
|
const char *prefix)
|
||||||
{
|
{
|
||||||
u8 *regs;
|
u32 *regs;
|
||||||
|
size_t pos;
|
||||||
|
|
||||||
|
if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
regs = kzalloc(len, GFP_KERNEL);
|
regs = kzalloc(len, GFP_KERNEL);
|
||||||
if (!regs)
|
if (!regs)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
memcpy_fromio(regs, hba->mmio_base + offset, len);
|
for (pos = 0; pos < len; pos += 4)
|
||||||
|
regs[pos / 4] = ufshcd_readl(hba, offset + pos);
|
||||||
|
|
||||||
ufshcd_hex_dump(prefix, regs, len);
|
ufshcd_hex_dump(prefix, regs, len);
|
||||||
kfree(regs);
|
kfree(regs);
|
||||||
|
|
||||||
|
@ -1317,12 +1317,13 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
|
|||||||
* target_complete_cmd will translate this to LUN COMM FAILURE
|
* target_complete_cmd will translate this to LUN COMM FAILURE
|
||||||
*/
|
*/
|
||||||
scsi_status = SAM_STAT_CHECK_CONDITION;
|
scsi_status = SAM_STAT_CHECK_CONDITION;
|
||||||
|
list_del_init(&cmd->queue_entry);
|
||||||
} else {
|
} else {
|
||||||
|
list_del_init(&cmd->queue_entry);
|
||||||
idr_remove(&udev->commands, id);
|
idr_remove(&udev->commands, id);
|
||||||
tcmu_free_cmd(cmd);
|
tcmu_free_cmd(cmd);
|
||||||
scsi_status = SAM_STAT_TASK_SET_FULL;
|
scsi_status = SAM_STAT_TASK_SET_FULL;
|
||||||
}
|
}
|
||||||
list_del_init(&cmd->queue_entry);
|
|
||||||
|
|
||||||
pr_debug("Timing out cmd %u on dev %s that is %s.\n",
|
pr_debug("Timing out cmd %u on dev %s that is %s.\n",
|
||||||
id, udev->name, is_running ? "inflight" : "queued");
|
id, udev->name, is_running ? "inflight" : "queued");
|
||||||
|
Loading…
Reference in New Issue
Block a user