linux/drivers/usb/storage/uas.c
Hans de Goede e2875c3378 uas: Limit qdepth to 32 when connected over usb-2
Some jmicron uas chipsets act up (they disconnect from the bus) when sending
more then 32 commands to them at once.

Rather then building an ever growing list with usb-id based quirks for
devices using this chipset, simply reduce the qdepth to 32 when connected
over usb-2. 32 should be plenty to keep things close to maximum
possible throughput on usb-2.

Cc: stable@vger.kernel.org
Tested-and-reported-by: Laszlo T. <tlacix@gmail.com>
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-08-01 15:48:08 -07:00

1262 lines
35 KiB
C

/*
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
* Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
* Distributed under the terms of the GNU GPL, version two.
*/
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb_usual.h>
#include <linux/usb/hcd.h>
#include <linux/usb/storage.h>
#include <linux/usb/uas.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include "uas-detect.h"
/*
* The r00-r01c specs define this version of the SENSE IU data structure.
* It's still in use by several different firmware releases.
*/
struct sense_iu_old {
__u8 iu_id;
__u8 rsvd1;
__be16 tag;
__be16 len;
__u8 status;
__u8 service_response;
__u8 sense[SCSI_SENSE_BUFFERSIZE];
};
struct uas_dev_info {
struct usb_interface *intf;
struct usb_device *udev;
struct usb_anchor cmd_urbs;
struct usb_anchor sense_urbs;
struct usb_anchor data_urbs;
int qdepth, resetting;
struct response_iu response;
unsigned cmd_pipe, status_pipe, data_in_pipe, data_out_pipe;
unsigned use_streams:1;
unsigned uas_sense_old:1;
unsigned running_task:1;
unsigned shutdown:1;
struct scsi_cmnd *cmnd;
spinlock_t lock;
struct work_struct work;
struct list_head inflight_list;
struct list_head dead_list;
};
enum {
SUBMIT_STATUS_URB = (1 << 1),
ALLOC_DATA_IN_URB = (1 << 2),
SUBMIT_DATA_IN_URB = (1 << 3),
ALLOC_DATA_OUT_URB = (1 << 4),
SUBMIT_DATA_OUT_URB = (1 << 5),
ALLOC_CMD_URB = (1 << 6),
SUBMIT_CMD_URB = (1 << 7),
COMMAND_INFLIGHT = (1 << 8),
DATA_IN_URB_INFLIGHT = (1 << 9),
DATA_OUT_URB_INFLIGHT = (1 << 10),
COMMAND_COMPLETED = (1 << 11),
COMMAND_ABORTED = (1 << 12),
UNLINK_DATA_URBS = (1 << 13),
IS_IN_WORK_LIST = (1 << 14),
};
/* Overrides scsi_pointer */
struct uas_cmd_info {
unsigned int state;
unsigned int stream;
struct urb *cmd_urb;
struct urb *data_in_urb;
struct urb *data_out_urb;
struct list_head list;
};
/* I hate forward declarations, but I actually have a loop */
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo, gfp_t gfp);
static void uas_do_work(struct work_struct *work);
static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller);
static void uas_free_streams(struct uas_dev_info *devinfo);
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *caller);
/* Must be called with devinfo->lock held, will temporary unlock the lock */
static void uas_unlink_data_urbs(struct uas_dev_info *devinfo,
struct uas_cmd_info *cmdinfo,
unsigned long *lock_flags)
{
/*
* The UNLINK_DATA_URBS flag makes sure uas_try_complete
* (called by urb completion) doesn't release cmdinfo
* underneath us.
*/
cmdinfo->state |= UNLINK_DATA_URBS;
spin_unlock_irqrestore(&devinfo->lock, *lock_flags);
if (cmdinfo->data_in_urb)
usb_unlink_urb(cmdinfo->data_in_urb);
if (cmdinfo->data_out_urb)
usb_unlink_urb(cmdinfo->data_out_urb);
spin_lock_irqsave(&devinfo->lock, *lock_flags);
cmdinfo->state &= ~UNLINK_DATA_URBS;
}
static void uas_do_work(struct work_struct *work)
{
struct uas_dev_info *devinfo =
container_of(work, struct uas_dev_info, work);
struct uas_cmd_info *cmdinfo;
unsigned long flags;
int err;
spin_lock_irqsave(&devinfo->lock, flags);
list_for_each_entry(cmdinfo, &devinfo->inflight_list, list) {
struct scsi_pointer *scp = (void *)cmdinfo;
struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd,
SCp);
if (!(cmdinfo->state & IS_IN_WORK_LIST))
continue;
err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
if (!err)
cmdinfo->state &= ~IS_IN_WORK_LIST;
else
schedule_work(&devinfo->work);
}
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_mark_cmd_dead(struct uas_dev_info *devinfo,
struct uas_cmd_info *cmdinfo,
int result, const char *caller)
{
struct scsi_pointer *scp = (void *)cmdinfo;
struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd, SCp);
uas_log_cmd_state(cmnd, caller);
WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
WARN_ON_ONCE(cmdinfo->state & COMMAND_ABORTED);
cmdinfo->state |= COMMAND_ABORTED;
cmdinfo->state &= ~IS_IN_WORK_LIST;
cmnd->result = result << 16;
list_move_tail(&cmdinfo->list, &devinfo->dead_list);
}
static void uas_abort_inflight(struct uas_dev_info *devinfo, int result,
const char *caller)
{
struct uas_cmd_info *cmdinfo;
struct uas_cmd_info *temp;
unsigned long flags;
spin_lock_irqsave(&devinfo->lock, flags);
list_for_each_entry_safe(cmdinfo, temp, &devinfo->inflight_list, list)
uas_mark_cmd_dead(devinfo, cmdinfo, result, caller);
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_add_work(struct uas_cmd_info *cmdinfo)
{
struct scsi_pointer *scp = (void *)cmdinfo;
struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd, SCp);
struct uas_dev_info *devinfo = cmnd->device->hostdata;
WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
cmdinfo->state |= IS_IN_WORK_LIST;
schedule_work(&devinfo->work);
}
static void uas_zap_dead(struct uas_dev_info *devinfo)
{
struct uas_cmd_info *cmdinfo;
struct uas_cmd_info *temp;
unsigned long flags;
spin_lock_irqsave(&devinfo->lock, flags);
list_for_each_entry_safe(cmdinfo, temp, &devinfo->dead_list, list) {
struct scsi_pointer *scp = (void *)cmdinfo;
struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd,
SCp);
uas_log_cmd_state(cmnd, __func__);
WARN_ON_ONCE(!(cmdinfo->state & COMMAND_ABORTED));
/* all urbs are killed, clear inflight bits */
cmdinfo->state &= ~(COMMAND_INFLIGHT |
DATA_IN_URB_INFLIGHT |
DATA_OUT_URB_INFLIGHT);
uas_try_complete(cmnd, __func__);
}
devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd)
{
struct sense_iu *sense_iu = urb->transfer_buffer;
struct scsi_device *sdev = cmnd->device;
if (urb->actual_length > 16) {
unsigned len = be16_to_cpup(&sense_iu->len);
if (len + 16 != urb->actual_length) {
int newlen = min(len + 16, urb->actual_length) - 16;
if (newlen < 0)
newlen = 0;
sdev_printk(KERN_INFO, sdev, "%s: urb length %d "
"disagrees with IU sense data length %d, "
"using %d bytes of sense data\n", __func__,
urb->actual_length, len, newlen);
len = newlen;
}
memcpy(cmnd->sense_buffer, sense_iu->sense, len);
}
cmnd->result = sense_iu->status;
}
static void uas_sense_old(struct urb *urb, struct scsi_cmnd *cmnd)
{
struct sense_iu_old *sense_iu = urb->transfer_buffer;
struct scsi_device *sdev = cmnd->device;
if (urb->actual_length > 8) {
unsigned len = be16_to_cpup(&sense_iu->len) - 2;
if (len + 8 != urb->actual_length) {
int newlen = min(len + 8, urb->actual_length) - 8;
if (newlen < 0)
newlen = 0;
sdev_printk(KERN_INFO, sdev, "%s: urb length %d "
"disagrees with IU sense data length %d, "
"using %d bytes of sense data\n", __func__,
urb->actual_length, len, newlen);
len = newlen;
}
memcpy(cmnd->sense_buffer, sense_iu->sense, len);
}
cmnd->result = sense_iu->status;
}
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *caller)
{
struct uas_cmd_info *ci = (void *)&cmnd->SCp;
scmd_printk(KERN_INFO, cmnd, "%s %p tag %d, inflight:"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
caller, cmnd, cmnd->request->tag,
(ci->state & SUBMIT_STATUS_URB) ? " s-st" : "",
(ci->state & ALLOC_DATA_IN_URB) ? " a-in" : "",
(ci->state & SUBMIT_DATA_IN_URB) ? " s-in" : "",
(ci->state & ALLOC_DATA_OUT_URB) ? " a-out" : "",
(ci->state & SUBMIT_DATA_OUT_URB) ? " s-out" : "",
(ci->state & ALLOC_CMD_URB) ? " a-cmd" : "",
(ci->state & SUBMIT_CMD_URB) ? " s-cmd" : "",
(ci->state & COMMAND_INFLIGHT) ? " CMD" : "",
(ci->state & DATA_IN_URB_INFLIGHT) ? " IN" : "",
(ci->state & DATA_OUT_URB_INFLIGHT) ? " OUT" : "",
(ci->state & COMMAND_COMPLETED) ? " done" : "",
(ci->state & COMMAND_ABORTED) ? " abort" : "",
(ci->state & UNLINK_DATA_URBS) ? " unlink": "",
(ci->state & IS_IN_WORK_LIST) ? " work" : "");
}
static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
{
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
if (cmdinfo->state & (COMMAND_INFLIGHT |
DATA_IN_URB_INFLIGHT |
DATA_OUT_URB_INFLIGHT |
UNLINK_DATA_URBS))
return -EBUSY;
WARN_ON_ONCE(cmdinfo->state & COMMAND_COMPLETED);
cmdinfo->state |= COMMAND_COMPLETED;
usb_free_urb(cmdinfo->data_in_urb);
usb_free_urb(cmdinfo->data_out_urb);
if (cmdinfo->state & COMMAND_ABORTED)
scmd_printk(KERN_INFO, cmnd, "abort completed\n");
list_del(&cmdinfo->list);
cmnd->scsi_done(cmnd);
return 0;
}
static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
unsigned direction)
{
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
int err;
cmdinfo->state |= direction | SUBMIT_STATUS_URB;
err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
if (err) {
uas_add_work(cmdinfo);
}
}
static void uas_stat_cmplt(struct urb *urb)
{
struct iu *iu = urb->transfer_buffer;
struct Scsi_Host *shost = urb->context;
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
struct scsi_cmnd *cmnd;
struct uas_cmd_info *cmdinfo;
unsigned long flags;
u16 tag;
if (urb->status) {
if (urb->status == -ENOENT) {
dev_err(&urb->dev->dev, "stat urb: killed, stream %d\n",
urb->stream_id);
} else {
dev_err(&urb->dev->dev, "stat urb: status %d\n",
urb->status);
}
usb_free_urb(urb);
return;
}
if (devinfo->resetting) {
usb_free_urb(urb);
return;
}
spin_lock_irqsave(&devinfo->lock, flags);
tag = be16_to_cpup(&iu->tag) - 1;
if (tag == 0)
cmnd = devinfo->cmnd;
else
cmnd = scsi_host_find_tag(shost, tag - 1);
if (!cmnd) {
if (iu->iu_id == IU_ID_RESPONSE) {
if (!devinfo->running_task)
dev_warn(&urb->dev->dev,
"stat urb: recv unexpected response iu\n");
/* store results for uas_eh_task_mgmt() */
memcpy(&devinfo->response, iu, sizeof(devinfo->response));
}
usb_free_urb(urb);
spin_unlock_irqrestore(&devinfo->lock, flags);
return;
}
cmdinfo = (void *)&cmnd->SCp;
switch (iu->iu_id) {
case IU_ID_STATUS:
if (devinfo->cmnd == cmnd)
devinfo->cmnd = NULL;
if (urb->actual_length < 16)
devinfo->uas_sense_old = 1;
if (devinfo->uas_sense_old)
uas_sense_old(urb, cmnd);
else
uas_sense(urb, cmnd);
if (cmnd->result != 0) {
/* cancel data transfers on error */
uas_unlink_data_urbs(devinfo, cmdinfo, &flags);
}
cmdinfo->state &= ~COMMAND_INFLIGHT;
uas_try_complete(cmnd, __func__);
break;
case IU_ID_READ_READY:
if (!cmdinfo->data_in_urb ||
(cmdinfo->state & DATA_IN_URB_INFLIGHT)) {
scmd_printk(KERN_ERR, cmnd, "unexpected read rdy\n");
break;
}
uas_xfer_data(urb, cmnd, SUBMIT_DATA_IN_URB);
break;
case IU_ID_WRITE_READY:
if (!cmdinfo->data_out_urb ||
(cmdinfo->state & DATA_OUT_URB_INFLIGHT)) {
scmd_printk(KERN_ERR, cmnd, "unexpected write rdy\n");
break;
}
uas_xfer_data(urb, cmnd, SUBMIT_DATA_OUT_URB);
break;
default:
scmd_printk(KERN_ERR, cmnd,
"Bogus IU (%d) received on status pipe\n", iu->iu_id);
}
usb_free_urb(urb);
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_data_cmplt(struct urb *urb)
{
struct scsi_cmnd *cmnd = urb->context;
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct scsi_data_buffer *sdb = NULL;
unsigned long flags;
spin_lock_irqsave(&devinfo->lock, flags);
if (cmdinfo->data_in_urb == urb) {
sdb = scsi_in(cmnd);
cmdinfo->state &= ~DATA_IN_URB_INFLIGHT;
} else if (cmdinfo->data_out_urb == urb) {
sdb = scsi_out(cmnd);
cmdinfo->state &= ~DATA_OUT_URB_INFLIGHT;
}
if (sdb == NULL) {
WARN_ON_ONCE(1);
} else if (urb->status) {
if (urb->status != -ECONNRESET) {
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_ERR, cmnd,
"data cmplt err %d stream %d\n",
urb->status, urb->stream_id);
}
/* error: no data transfered */
sdb->resid = sdb->length;
} else {
sdb->resid = sdb->length - urb->actual_length;
}
uas_try_complete(cmnd, __func__);
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_cmd_cmplt(struct urb *urb)
{
struct scsi_cmnd *cmnd = urb->context;
if (urb->status) {
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_ERR, cmnd, "cmd cmplt err %d\n", urb->status);
}
usb_free_urb(urb);
}
static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
unsigned int pipe, u16 stream_id,
struct scsi_cmnd *cmnd,
enum dma_data_direction dir)
{
struct usb_device *udev = devinfo->udev;
struct urb *urb = usb_alloc_urb(0, gfp);
struct scsi_data_buffer *sdb = (dir == DMA_FROM_DEVICE)
? scsi_in(cmnd) : scsi_out(cmnd);
if (!urb)
goto out;
usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length,
uas_data_cmplt, cmnd);
urb->stream_id = stream_id;
urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
urb->sg = sdb->table.sgl;
out:
return urb;
}
static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
struct Scsi_Host *shost, u16 stream_id)
{
struct usb_device *udev = devinfo->udev;
struct urb *urb = usb_alloc_urb(0, gfp);
struct sense_iu *iu;
if (!urb)
goto out;
iu = kzalloc(sizeof(*iu), gfp);
if (!iu)
goto free;
usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
uas_stat_cmplt, shost);
urb->stream_id = stream_id;
urb->transfer_flags |= URB_FREE_BUFFER;
out:
return urb;
free:
usb_free_urb(urb);
return NULL;
}
static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
struct scsi_cmnd *cmnd)
{
struct usb_device *udev = devinfo->udev;
struct scsi_device *sdev = cmnd->device;
struct urb *urb = usb_alloc_urb(0, gfp);
struct command_iu *iu;
int len;
if (!urb)
goto out;
len = cmnd->cmd_len - 16;
if (len < 0)
len = 0;
len = ALIGN(len, 4);
iu = kzalloc(sizeof(*iu) + len, gfp);
if (!iu)
goto free;
iu->iu_id = IU_ID_COMMAND;
if (blk_rq_tagged(cmnd->request))
iu->tag = cpu_to_be16(cmnd->request->tag + 2);
else
iu->tag = cpu_to_be16(1);
iu->prio_attr = UAS_SIMPLE_TAG;
iu->len = len;
int_to_scsilun(sdev->lun, &iu->lun);
memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu) + len,
uas_cmd_cmplt, cmnd);
urb->transfer_flags |= URB_FREE_BUFFER;
out:
return urb;
free:
usb_free_urb(urb);
return NULL;
}
static int uas_submit_task_urb(struct scsi_cmnd *cmnd, gfp_t gfp,
u8 function, u16 stream_id)
{
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct usb_device *udev = devinfo->udev;
struct urb *urb = usb_alloc_urb(0, gfp);
struct task_mgmt_iu *iu;
int err = -ENOMEM;
if (!urb)
goto err;
iu = kzalloc(sizeof(*iu), gfp);
if (!iu)
goto err;
iu->iu_id = IU_ID_TASK_MGMT;
iu->tag = cpu_to_be16(stream_id);
int_to_scsilun(cmnd->device->lun, &iu->lun);
iu->function = function;
switch (function) {
case TMF_ABORT_TASK:
if (blk_rq_tagged(cmnd->request))
iu->task_tag = cpu_to_be16(cmnd->request->tag + 2);
else
iu->task_tag = cpu_to_be16(1);
break;
}
usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu),
uas_cmd_cmplt, cmnd);
urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &devinfo->cmd_urbs);
err = usb_submit_urb(urb, gfp);
if (err) {
usb_unanchor_urb(urb);
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_ERR, cmnd, "task submission err %d\n", err);
goto err;
}
return 0;
err:
usb_free_urb(urb);
return err;
}
/*
* Why should I request the Status IU before sending the Command IU? Spec
* says to, but also says the device may receive them in any order. Seems
* daft to me.
*/
static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd,
gfp_t gfp, unsigned int stream)
{
struct Scsi_Host *shost = cmnd->device->host;
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
struct urb *urb;
int err;
urb = uas_alloc_sense_urb(devinfo, gfp, shost, stream);
if (!urb)
return NULL;
usb_anchor_urb(urb, &devinfo->sense_urbs);
err = usb_submit_urb(urb, gfp);
if (err) {
usb_unanchor_urb(urb);
uas_log_cmd_state(cmnd, __func__);
shost_printk(KERN_INFO, shost,
"sense urb submission error %d stream %d\n",
err, stream);
usb_free_urb(urb);
return NULL;
}
return urb;
}
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo, gfp_t gfp)
{
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct urb *urb;
int err;
WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
if (cmdinfo->state & SUBMIT_STATUS_URB) {
urb = uas_submit_sense_urb(cmnd, gfp, cmdinfo->stream);
if (!urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~SUBMIT_STATUS_URB;
}
if (cmdinfo->state & ALLOC_DATA_IN_URB) {
cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, gfp,
devinfo->data_in_pipe, cmdinfo->stream,
cmnd, DMA_FROM_DEVICE);
if (!cmdinfo->data_in_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_DATA_IN_URB;
}
if (cmdinfo->state & SUBMIT_DATA_IN_URB) {
usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs);
err = usb_submit_urb(cmdinfo->data_in_urb, gfp);
if (err) {
usb_unanchor_urb(cmdinfo->data_in_urb);
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
"data in urb submission error %d stream %d\n",
err, cmdinfo->data_in_urb->stream_id);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
cmdinfo->state |= DATA_IN_URB_INFLIGHT;
}
if (cmdinfo->state & ALLOC_DATA_OUT_URB) {
cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, gfp,
devinfo->data_out_pipe, cmdinfo->stream,
cmnd, DMA_TO_DEVICE);
if (!cmdinfo->data_out_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
}
if (cmdinfo->state & SUBMIT_DATA_OUT_URB) {
usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs);
err = usb_submit_urb(cmdinfo->data_out_urb, gfp);
if (err) {
usb_unanchor_urb(cmdinfo->data_out_urb);
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
"data out urb submission error %d stream %d\n",
err, cmdinfo->data_out_urb->stream_id);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
}
if (cmdinfo->state & ALLOC_CMD_URB) {
cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd);
if (!cmdinfo->cmd_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_CMD_URB;
}
if (cmdinfo->state & SUBMIT_CMD_URB) {
usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs);
err = usb_submit_urb(cmdinfo->cmd_urb, gfp);
if (err) {
usb_unanchor_urb(cmdinfo->cmd_urb);
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
"cmd urb submission error %d\n", err);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->cmd_urb = NULL;
cmdinfo->state &= ~SUBMIT_CMD_URB;
cmdinfo->state |= COMMAND_INFLIGHT;
}
return 0;
}
static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
void (*done)(struct scsi_cmnd *))
{
struct scsi_device *sdev = cmnd->device;
struct uas_dev_info *devinfo = sdev->hostdata;
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
unsigned long flags;
int err;
BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->resetting) {
cmnd->result = DID_ERROR << 16;
cmnd->scsi_done(cmnd);
spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
if (devinfo->cmnd) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
memset(cmdinfo, 0, sizeof(*cmdinfo));
if (blk_rq_tagged(cmnd->request)) {
cmdinfo->stream = cmnd->request->tag + 2;
} else {
devinfo->cmnd = cmnd;
cmdinfo->stream = 1;
}
cmnd->scsi_done = done;
cmdinfo->state = SUBMIT_STATUS_URB |
ALLOC_CMD_URB | SUBMIT_CMD_URB;
switch (cmnd->sc_data_direction) {
case DMA_FROM_DEVICE:
cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
break;
case DMA_BIDIRECTIONAL:
cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
case DMA_TO_DEVICE:
cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB;
case DMA_NONE:
break;
}
if (!devinfo->use_streams) {
cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB);
cmdinfo->stream = 0;
}
err = uas_submit_urbs(cmnd, devinfo, GFP_ATOMIC);
if (err) {
/* If we did nothing, give up now */
if (cmdinfo->state & SUBMIT_STATUS_URB) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
uas_add_work(cmdinfo);
}
list_add_tail(&cmdinfo->list, &devinfo->inflight_list);
spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
static DEF_SCSI_QCMD(uas_queuecommand)
static int uas_eh_task_mgmt(struct scsi_cmnd *cmnd,
const char *fname, u8 function)
{
struct Scsi_Host *shost = cmnd->device->host;
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
u16 tag = devinfo->qdepth;
unsigned long flags;
struct urb *sense_urb;
int result = SUCCESS;
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->resetting) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
if (devinfo->running_task) {
shost_printk(KERN_INFO, shost,
"%s: %s: error already running a task\n",
__func__, fname);
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
devinfo->running_task = 1;
memset(&devinfo->response, 0, sizeof(devinfo->response));
sense_urb = uas_submit_sense_urb(cmnd, GFP_ATOMIC,
devinfo->use_streams ? tag : 0);
if (!sense_urb) {
shost_printk(KERN_INFO, shost,
"%s: %s: submit sense urb failed\n",
__func__, fname);
devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
if (uas_submit_task_urb(cmnd, GFP_ATOMIC, function, tag)) {
shost_printk(KERN_INFO, shost,
"%s: %s: submit task mgmt urb failed\n",
__func__, fname);
devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
usb_kill_urb(sense_urb);
return FAILED;
}
spin_unlock_irqrestore(&devinfo->lock, flags);
if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 3000) == 0) {
/*
* Note we deliberately do not clear running_task here. If we
* allow new tasks to be submitted, there is no way to figure
* out if a received response_iu is for the failed task or for
* the new one. A bus-reset will eventually clear running_task.
*/
shost_printk(KERN_INFO, shost,
"%s: %s timed out\n", __func__, fname);
return FAILED;
}
spin_lock_irqsave(&devinfo->lock, flags);
devinfo->running_task = 0;
if (be16_to_cpu(devinfo->response.tag) != tag) {
shost_printk(KERN_INFO, shost,
"%s: %s failed (wrong tag %d/%d)\n", __func__,
fname, be16_to_cpu(devinfo->response.tag), tag);
result = FAILED;
} else if (devinfo->response.response_code != RC_TMF_COMPLETE) {
shost_printk(KERN_INFO, shost,
"%s: %s failed (rc 0x%x)\n", __func__,
fname, devinfo->response.response_code);
result = FAILED;
}
spin_unlock_irqrestore(&devinfo->lock, flags);
return result;
}
static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
{
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
unsigned long flags;
int ret;
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->resetting) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
uas_mark_cmd_dead(devinfo, cmdinfo, DID_ABORT, __func__);
if (cmdinfo->state & COMMAND_INFLIGHT) {
spin_unlock_irqrestore(&devinfo->lock, flags);
ret = uas_eh_task_mgmt(cmnd, "ABORT TASK", TMF_ABORT_TASK);
} else {
uas_unlink_data_urbs(devinfo, cmdinfo, &flags);
uas_try_complete(cmnd, __func__);
spin_unlock_irqrestore(&devinfo->lock, flags);
ret = SUCCESS;
}
return ret;
}
static int uas_eh_device_reset_handler(struct scsi_cmnd *cmnd)
{
sdev_printk(KERN_INFO, cmnd->device, "%s\n", __func__);
return uas_eh_task_mgmt(cmnd, "LOGICAL UNIT RESET",
TMF_LOGICAL_UNIT_RESET);
}
static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
{
struct scsi_device *sdev = cmnd->device;
struct uas_dev_info *devinfo = sdev->hostdata;
struct usb_device *udev = devinfo->udev;
int err;
err = usb_lock_device_for_reset(udev, devinfo->intf);
if (err) {
shost_printk(KERN_ERR, sdev->host,
"%s FAILED to get lock err %d\n", __func__, err);
return FAILED;
}
shost_printk(KERN_INFO, sdev->host, "%s start\n", __func__);
devinfo->resetting = 1;
uas_abort_inflight(devinfo, DID_RESET, __func__);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
uas_zap_dead(devinfo);
err = usb_reset_device(udev);
devinfo->resetting = 0;
usb_unlock_device(udev);
if (err) {
shost_printk(KERN_INFO, sdev->host, "%s FAILED\n", __func__);
return FAILED;
}
shost_printk(KERN_INFO, sdev->host, "%s success\n", __func__);
return SUCCESS;
}
static int uas_slave_alloc(struct scsi_device *sdev)
{
sdev->hostdata = (void *)sdev->host->hostdata;
/* USB has unusual DMA-alignment requirements: Although the
* starting address of each scatter-gather element doesn't matter,
* the length of each element except the last must be divisible
* by the Bulk maxpacket value. There's currently no way to
* express this by block-layer constraints, so we'll cop out
* and simply require addresses to be aligned at 512-byte
* boundaries. This is okay since most block I/O involves
* hardware sectors that are multiples of 512 bytes in length,
* and since host controllers up through USB 2.0 have maxpacket
* values no larger than 512.
*
* But it doesn't suffice for Wireless USB, where Bulk maxpacket
* values can be as large as 2048. To make that work properly
* will require changes to the block layer.
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
return 0;
}
static int uas_slave_configure(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo = sdev->hostdata;
scsi_set_tag_type(sdev, MSG_ORDERED_TAG);
scsi_activate_tcq(sdev, devinfo->qdepth - 2);
return 0;
}
static struct scsi_host_template uas_host_template = {
.module = THIS_MODULE,
.name = "uas",
.queuecommand = uas_queuecommand,
.slave_alloc = uas_slave_alloc,
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_device_reset_handler = uas_eh_device_reset_handler,
.eh_bus_reset_handler = uas_eh_bus_reset_handler,
.can_queue = 65536, /* Is there a limit on the _host_ ? */
.this_id = -1,
.sg_tablesize = SG_NONE,
.cmd_per_lun = 1, /* until we override it */
.skip_settle_delay = 1,
.ordered_tag = 1,
};
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id uas_usb_ids[] = {
# include "unusual_uas.h"
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) },
/* 0xaa is a prototype device I happen to have access to */
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, 0xaa) },
{ }
};
MODULE_DEVICE_TABLE(usb, uas_usb_ids);
#undef UNUSUAL_DEV
static int uas_switch_interface(struct usb_device *udev,
struct usb_interface *intf)
{
int alt;
alt = uas_find_uas_alt_setting(intf);
if (alt < 0)
return alt;
return usb_set_interface(udev,
intf->altsetting[0].desc.bInterfaceNumber, alt);
}
static int uas_configure_endpoints(struct uas_dev_info *devinfo)
{
struct usb_host_endpoint *eps[4] = { };
struct usb_device *udev = devinfo->udev;
int r;
devinfo->uas_sense_old = 0;
devinfo->cmnd = NULL;
r = uas_find_endpoints(devinfo->intf->cur_altsetting, eps);
if (r)
return r;
devinfo->cmd_pipe = usb_sndbulkpipe(udev,
usb_endpoint_num(&eps[0]->desc));
devinfo->status_pipe = usb_rcvbulkpipe(udev,
usb_endpoint_num(&eps[1]->desc));
devinfo->data_in_pipe = usb_rcvbulkpipe(udev,
usb_endpoint_num(&eps[2]->desc));
devinfo->data_out_pipe = usb_sndbulkpipe(udev,
usb_endpoint_num(&eps[3]->desc));
if (udev->speed != USB_SPEED_SUPER) {
devinfo->qdepth = 32;
devinfo->use_streams = 0;
} else {
devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1,
3, 256, GFP_NOIO);
if (devinfo->qdepth < 0)
return devinfo->qdepth;
devinfo->use_streams = 1;
}
return 0;
}
static void uas_free_streams(struct uas_dev_info *devinfo)
{
struct usb_device *udev = devinfo->udev;
struct usb_host_endpoint *eps[3];
eps[0] = usb_pipe_endpoint(udev, devinfo->status_pipe);
eps[1] = usb_pipe_endpoint(udev, devinfo->data_in_pipe);
eps[2] = usb_pipe_endpoint(udev, devinfo->data_out_pipe);
usb_free_streams(devinfo->intf, eps, 3, GFP_NOIO);
}
static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
int result = -ENOMEM;
struct Scsi_Host *shost = NULL;
struct uas_dev_info *devinfo;
struct usb_device *udev = interface_to_usbdev(intf);
if (!uas_use_uas_driver(intf, id))
return -ENODEV;
if (uas_switch_interface(udev, intf))
return -ENODEV;
shost = scsi_host_alloc(&uas_host_template,
sizeof(struct uas_dev_info));
if (!shost)
goto set_alt0;
shost->max_cmd_len = 16 + 252;
shost->max_id = 1;
shost->max_lun = 256;
shost->max_channel = 0;
shost->sg_tablesize = udev->bus->sg_tablesize;
devinfo = (struct uas_dev_info *)shost->hostdata;
devinfo->intf = intf;
devinfo->udev = udev;
devinfo->resetting = 0;
devinfo->running_task = 0;
devinfo->shutdown = 0;
init_usb_anchor(&devinfo->cmd_urbs);
init_usb_anchor(&devinfo->sense_urbs);
init_usb_anchor(&devinfo->data_urbs);
spin_lock_init(&devinfo->lock);
INIT_WORK(&devinfo->work, uas_do_work);
INIT_LIST_HEAD(&devinfo->inflight_list);
INIT_LIST_HEAD(&devinfo->dead_list);
result = uas_configure_endpoints(devinfo);
if (result)
goto set_alt0;
result = scsi_init_shared_tag_map(shost, devinfo->qdepth - 2);
if (result)
goto free_streams;
usb_set_intfdata(intf, shost);
result = scsi_add_host(shost, &intf->dev);
if (result)
goto free_streams;
scsi_scan_host(shost);
return result;
free_streams:
uas_free_streams(devinfo);
usb_set_intfdata(intf, NULL);
set_alt0:
usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
if (shost)
scsi_host_put(shost);
return result;
}
static int uas_pre_reset(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
if (devinfo->shutdown)
return 0;
/* Block new requests */
spin_lock_irqsave(shost->host_lock, flags);
scsi_block_requests(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
/* Wait for any pending requests to complete */
flush_work(&devinfo->work);
if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 5000) == 0) {
shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
return 1;
}
uas_free_streams(devinfo);
return 0;
}
static int uas_post_reset(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
if (devinfo->shutdown)
return 0;
if (uas_configure_endpoints(devinfo) != 0) {
shost_printk(KERN_ERR, shost,
"%s: alloc streams error after reset", __func__);
return 1;
}
spin_lock_irqsave(shost->host_lock, flags);
scsi_report_bus_reset(shost, 0);
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_unblock_requests(shost);
return 0;
}
static int uas_suspend(struct usb_interface *intf, pm_message_t message)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
/* Wait for any pending requests to complete */
flush_work(&devinfo->work);
if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 5000) == 0) {
shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
return -ETIME;
}
return 0;
}
static int uas_resume(struct usb_interface *intf)
{
return 0;
}
static int uas_reset_resume(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
if (uas_configure_endpoints(devinfo) != 0) {
shost_printk(KERN_ERR, shost,
"%s: alloc streams error after reset", __func__);
return -EIO;
}
spin_lock_irqsave(shost->host_lock, flags);
scsi_report_bus_reset(shost, 0);
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
static void uas_disconnect(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
devinfo->resetting = 1;
cancel_work_sync(&devinfo->work);
uas_abort_inflight(devinfo, DID_NO_CONNECT, __func__);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
uas_zap_dead(devinfo);
scsi_remove_host(shost);
uas_free_streams(devinfo);
scsi_host_put(shost);
}
/*
* Put the device back in usb-storage mode on shutdown, as some BIOS-es
* hang on reboot when the device is still in uas mode. Note the reset is
* necessary as some devices won't revert to usb-storage mode without it.
*/
static void uas_shutdown(struct device *dev)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *udev = interface_to_usbdev(intf);
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
if (system_state != SYSTEM_RESTART)
return;
devinfo->shutdown = 1;
uas_free_streams(devinfo);
usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
usb_reset_device(udev);
}
static struct usb_driver uas_driver = {
.name = "uas",
.probe = uas_probe,
.disconnect = uas_disconnect,
.pre_reset = uas_pre_reset,
.post_reset = uas_post_reset,
.suspend = uas_suspend,
.resume = uas_resume,
.reset_resume = uas_reset_resume,
.drvwrap.driver.shutdown = uas_shutdown,
.id_table = uas_usb_ids,
};
module_usb_driver(uas_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(
"Hans de Goede <hdegoede@redhat.com>, Matthew Wilcox and Sarah Sharp");