Merge branch '5.15/scsi-fixes' into 5.16/scsi-staging

Merge the 5.15/scsi-fixes branch into the staging tree to resolve UFS
conflict reported by sfr.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Martin K. Petersen 2021-10-12 11:58:12 -04:00
commit ec65e6beb0
40 changed files with 335 additions and 349 deletions

View File

@ -16650,13 +16650,6 @@ M: Lubomir Rintel <lkundrak@v3.sk>
S: Supported S: Supported
F: drivers/char/pcmcia/scr24x_cs.c F: drivers/char/pcmcia/scr24x_cs.c
SCSI CDROM DRIVER
M: Jens Axboe <axboe@kernel.dk>
L: linux-scsi@vger.kernel.org
S: Maintained
W: http://www.kernel.dk
F: drivers/scsi/sr*
SCSI RDMA PROTOCOL (SRP) INITIATOR SCSI RDMA PROTOCOL (SRP) INITIATOR
M: Bart Van Assche <bvanassche@acm.org> M: Bart Van Assche <bvanassche@acm.org>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org

View File

@ -165,13 +165,20 @@ static const struct file_operations bsg_fops = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
static void bsg_device_release(struct device *dev)
{
struct bsg_device *bd = container_of(dev, struct bsg_device, device);
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
kfree(bd);
}
void bsg_unregister_queue(struct bsg_device *bd) void bsg_unregister_queue(struct bsg_device *bd)
{ {
if (bd->queue->kobj.sd) if (bd->queue->kobj.sd)
sysfs_remove_link(&bd->queue->kobj, "bsg"); sysfs_remove_link(&bd->queue->kobj, "bsg");
cdev_device_del(&bd->cdev, &bd->device); cdev_device_del(&bd->cdev, &bd->device);
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); put_device(&bd->device);
kfree(bd);
} }
EXPORT_SYMBOL_GPL(bsg_unregister_queue); EXPORT_SYMBOL_GPL(bsg_unregister_queue);
@ -193,11 +200,13 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
if (ret < 0) { if (ret < 0) {
if (ret == -ENOSPC) if (ret == -ENOSPC)
dev_err(parent, "bsg: too many bsg devices\n"); dev_err(parent, "bsg: too many bsg devices\n");
goto out_kfree; kfree(bd);
return ERR_PTR(ret);
} }
bd->device.devt = MKDEV(bsg_major, ret); bd->device.devt = MKDEV(bsg_major, ret);
bd->device.class = bsg_class; bd->device.class = bsg_class;
bd->device.parent = parent; bd->device.parent = parent;
bd->device.release = bsg_device_release;
dev_set_name(&bd->device, "%s", name); dev_set_name(&bd->device, "%s", name);
device_initialize(&bd->device); device_initialize(&bd->device);
@ -205,7 +214,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
bd->cdev.owner = THIS_MODULE; bd->cdev.owner = THIS_MODULE;
ret = cdev_device_add(&bd->cdev, &bd->device); ret = cdev_device_add(&bd->cdev, &bd->device);
if (ret) if (ret)
goto out_ida_remove; goto out_put_device;
if (q->kobj.sd) { if (q->kobj.sd) {
ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg"); ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
@ -217,10 +226,8 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
out_device_del: out_device_del:
cdev_device_del(&bd->cdev, &bd->device); cdev_device_del(&bd->cdev, &bd->device);
out_ida_remove: out_put_device:
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); put_device(&bd->device);
out_kfree:
kfree(bd);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
EXPORT_SYMBOL_GPL(bsg_register_queue); EXPORT_SYMBOL_GPL(bsg_register_queue);

View File

@ -10,17 +10,6 @@ config SCSI_ACORNSCSI_3
This enables support for the Acorn SCSI card (aka30). If you have an This enables support for the Acorn SCSI card (aka30). If you have an
Acorn system with one of these, say Y. If unsure, say N. Acorn system with one of these, say Y. If unsure, say N.
config SCSI_ACORNSCSI_TAGGED_QUEUE
bool "Support SCSI 2 Tagged queueing"
depends on SCSI_ACORNSCSI_3
help
Say Y here to enable tagged queuing support on the Acorn SCSI card.
This is a feature of SCSI-2 which improves performance: the host
adapter can send several SCSI commands to a device's queue even if
previous commands haven't finished yet. Some SCSI devices don't
implement this properly, so the safe answer is N.
config SCSI_ACORNSCSI_SYNC config SCSI_ACORNSCSI_SYNC
bool "Support SCSI 2 Synchronous Transfers" bool "Support SCSI 2 Synchronous Transfers"
depends on SCSI_ACORNSCSI_3 depends on SCSI_ACORNSCSI_3

View File

@ -52,12 +52,8 @@
* You can tell if you have a device that supports tagged queueing my * You can tell if you have a device that supports tagged queueing my
* cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported * cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported
* as '2 TAG'. * as '2 TAG'.
*
* Also note that CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE is normally set in the config
* scripts, but disabled here. Once debugged, remove the #undef, otherwise to debug,
* comment out the undef.
*/ */
#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
/* /*
* SCSI-II Synchronous transfer support. * SCSI-II Synchronous transfer support.
* *
@ -171,7 +167,7 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
unsigned int result); unsigned int result);
static int acornscsi_reconnect_finish(AS_Host *host); static int acornscsi_reconnect_finish(AS_Host *host);
static void acornscsi_dma_cleanup(AS_Host *host); static void acornscsi_dma_cleanup(AS_Host *host);
static void acornscsi_abortcmd(AS_Host *host, unsigned char tag); static void acornscsi_abortcmd(AS_Host *host);
/* ==================================================================================== /* ====================================================================================
* Miscellaneous * Miscellaneous
@ -741,17 +737,6 @@ intr_ret_t acornscsi_kick(AS_Host *host)
#endif #endif
if (from_queue) { if (from_queue) {
#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
/*
* tagged queueing - allocate a new tag to this command
*/
if (SCpnt->device->simple_tags) {
SCpnt->device->current_tag += 1;
if (SCpnt->device->current_tag == 0)
SCpnt->device->current_tag = 1;
SCpnt->tag = SCpnt->device->current_tag;
} else
#endif
set_bit(SCpnt->device->id * 8 + set_bit(SCpnt->device->id * 8 +
(u8)(SCpnt->device->lun & 0x07), host->busyluns); (u8)(SCpnt->device->lun & 0x07), host->busyluns);
@ -1192,7 +1177,7 @@ void acornscsi_dma_intr(AS_Host *host)
* the device recognises the attention. * the device recognises the attention.
*/ */
if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) { if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) {
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
dmac_write(host, DMAC_TXCNTLO, 0); dmac_write(host, DMAC_TXCNTLO, 0);
dmac_write(host, DMAC_TXCNTHI, 0); dmac_write(host, DMAC_TXCNTHI, 0);
@ -1560,23 +1545,6 @@ void acornscsi_message(AS_Host *host)
acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
switch (host->scsi.last_message) { switch (host->scsi.last_message) {
#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
case HEAD_OF_QUEUE_TAG:
case ORDERED_QUEUE_TAG:
case SIMPLE_QUEUE_TAG:
/*
* ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17)
* If a target does not implement tagged queuing and a queue tag
* message is received, it shall respond with a MESSAGE REJECT
* message and accept the I/O process as if it were untagged.
*/
printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n",
host->host->host_no, acornscsi_target(host));
host->SCpnt->device->simple_tags = 0;
set_bit(host->SCpnt->device->id * 8 +
(u8)(host->SCpnt->device->lun & 0x7), host->busyluns);
break;
#endif
case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8): case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8):
/* /*
* Target can't handle synchronous transfers * Target can't handle synchronous transfers
@ -1687,24 +1655,11 @@ void acornscsi_buildmessages(AS_Host *host)
#if 0 #if 0
/* does the device need the current command aborted */ /* does the device need the current command aborted */
if (cmd_aborted) { if (cmd_aborted) {
acornscsi_abortcmd(host->SCpnt->tag); acornscsi_abortcmd(host);
return; return;
} }
#endif #endif
#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
if (host->SCpnt->tag) {
unsigned int tag_type;
if (host->SCpnt->cmnd[0] == REQUEST_SENSE ||
host->SCpnt->cmnd[0] == TEST_UNIT_READY ||
host->SCpnt->cmnd[0] == INQUIRY)
tag_type = HEAD_OF_QUEUE_TAG;
else
tag_type = SIMPLE_QUEUE_TAG;
msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag);
}
#endif
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC #ifdef CONFIG_SCSI_ACORNSCSI_SYNC
if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) { if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) {
@ -1798,7 +1753,7 @@ int acornscsi_reconnect(AS_Host *host)
"to reconnect with\n", "to reconnect with\n",
host->host->host_no, '0' + target); host->host->host_no, '0' + target);
acornscsi_dumplog(host, target); acornscsi_dumplog(host, target);
acornscsi_abortcmd(host, 0); acornscsi_abortcmd(host);
if (host->SCpnt) { if (host->SCpnt) {
queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt);
host->SCpnt = NULL; host->SCpnt = NULL;
@ -1821,7 +1776,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
host->scsi.disconnectable = 0; host->scsi.disconnectable = 0;
if (host->SCpnt->device->id == host->scsi.reconnected.target && if (host->SCpnt->device->id == host->scsi.reconnected.target &&
host->SCpnt->device->lun == host->scsi.reconnected.lun && host->SCpnt->device->lun == host->scsi.reconnected.lun &&
host->SCpnt->tag == host->scsi.reconnected.tag) { scsi_cmd_to_rq(host->SCpnt)->tag == host->scsi.reconnected.tag) {
#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
DBG(host->SCpnt, printk("scsi%d.%c: reconnected", DBG(host->SCpnt, printk("scsi%d.%c: reconnected",
host->host->host_no, acornscsi_target(host))); host->host->host_no, acornscsi_target(host)));
@ -1848,7 +1803,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
} }
if (!host->SCpnt) if (!host->SCpnt)
acornscsi_abortcmd(host, host->scsi.reconnected.tag); acornscsi_abortcmd(host);
else { else {
/* /*
* Restore data pointer from SAVED pointers. * Restore data pointer from SAVED pointers.
@ -1889,21 +1844,15 @@ void acornscsi_disconnect_unexpected(AS_Host *host)
* Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag) * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag)
* Purpose : abort a currently executing command * Purpose : abort a currently executing command
* Params : host - host with connected command to abort * Params : host - host with connected command to abort
* tag - tag to abort
*/ */
static static
void acornscsi_abortcmd(AS_Host *host, unsigned char tag) void acornscsi_abortcmd(AS_Host *host)
{ {
host->scsi.phase = PHASE_ABORTED; host->scsi.phase = PHASE_ABORTED;
sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN); sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN);
msgqueue_flush(&host->scsi.msgs); msgqueue_flush(&host->scsi.msgs);
#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
if (tag)
msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag);
else
#endif
msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
} }
/* ========================================================================================== /* ==========================================================================================
@ -1993,7 +1942,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n", printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n",
host->host->host_no, acornscsi_target(host), ssr); host->host->host_no, acornscsi_target(host), ssr);
acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
} }
return INTR_PROCESSING; return INTR_PROCESSING;
@ -2029,7 +1978,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n", printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n",
host->host->host_no, acornscsi_target(host), ssr); host->host->host_no, acornscsi_target(host), ssr);
acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
} }
return INTR_PROCESSING; return INTR_PROCESSING;
@ -2075,20 +2024,20 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x18: /* -> PHASE_DATAOUT */ case 0x18: /* -> PHASE_DATAOUT */
/* COMMAND -> DATA OUT */ /* COMMAND -> DATA OUT */
if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
acornscsi_dma_setup(host, DMA_OUT); acornscsi_dma_setup(host, DMA_OUT);
if (!acornscsi_starttransfer(host)) if (!acornscsi_starttransfer(host))
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAOUT; host->scsi.phase = PHASE_DATAOUT;
return INTR_IDLE; return INTR_IDLE;
case 0x19: /* -> PHASE_DATAIN */ case 0x19: /* -> PHASE_DATAIN */
/* COMMAND -> DATA IN */ /* COMMAND -> DATA IN */
if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
acornscsi_dma_setup(host, DMA_IN); acornscsi_dma_setup(host, DMA_IN);
if (!acornscsi_starttransfer(host)) if (!acornscsi_starttransfer(host))
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAIN; host->scsi.phase = PHASE_DATAIN;
return INTR_IDLE; return INTR_IDLE;
@ -2156,7 +2105,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
/* MESSAGE IN -> DATA OUT */ /* MESSAGE IN -> DATA OUT */
acornscsi_dma_setup(host, DMA_OUT); acornscsi_dma_setup(host, DMA_OUT);
if (!acornscsi_starttransfer(host)) if (!acornscsi_starttransfer(host))
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAOUT; host->scsi.phase = PHASE_DATAOUT;
return INTR_IDLE; return INTR_IDLE;
@ -2165,7 +2114,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
/* MESSAGE IN -> DATA IN */ /* MESSAGE IN -> DATA IN */
acornscsi_dma_setup(host, DMA_IN); acornscsi_dma_setup(host, DMA_IN);
if (!acornscsi_starttransfer(host)) if (!acornscsi_starttransfer(host))
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAIN; host->scsi.phase = PHASE_DATAIN;
return INTR_IDLE; return INTR_IDLE;
@ -2206,7 +2155,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
switch (ssr) { switch (ssr) {
case 0x19: /* -> PHASE_DATAIN */ case 0x19: /* -> PHASE_DATAIN */
case 0x89: /* -> PHASE_DATAIN */ case 0x89: /* -> PHASE_DATAIN */
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
return INTR_IDLE; return INTR_IDLE;
case 0x1b: /* -> PHASE_STATUSIN */ case 0x1b: /* -> PHASE_STATUSIN */
@ -2255,7 +2204,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
switch (ssr) { switch (ssr) {
case 0x18: /* -> PHASE_DATAOUT */ case 0x18: /* -> PHASE_DATAOUT */
case 0x88: /* -> PHASE_DATAOUT */ case 0x88: /* -> PHASE_DATAOUT */
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
return INTR_IDLE; return INTR_IDLE;
case 0x1b: /* -> PHASE_STATUSIN */ case 0x1b: /* -> PHASE_STATUSIN */
@ -2482,7 +2431,6 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
SCpnt->scsi_done = done; SCpnt->scsi_done = done;
SCpnt->host_scribble = NULL; SCpnt->host_scribble = NULL;
SCpnt->result = 0; SCpnt->result = 0;
SCpnt->tag = 0;
SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]); SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
SCpnt->SCp.sent_command = 0; SCpnt->SCp.sent_command = 0;
SCpnt->SCp.scsi_xferred = 0; SCpnt->SCp.scsi_xferred = 0;
@ -2581,7 +2529,7 @@ static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt)
break; break;
default: default:
acornscsi_abortcmd(host, host->SCpnt->tag); acornscsi_abortcmd(host);
res = res_snooze; res = res_snooze;
} }
local_irq_restore(flags); local_irq_restore(flags);
@ -2747,9 +2695,6 @@ char *acornscsi_info(struct Scsi_Host *host)
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC #ifdef CONFIG_SCSI_ACORNSCSI_SYNC
" SYNC" " SYNC"
#endif #endif
#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
" TAG"
#endif
#if (DEBUG & DEBUG_NO_WRITE) #if (DEBUG & DEBUG_NO_WRITE)
" NOWRITE (" __stringify(NO_WRITE) ")" " NOWRITE (" __stringify(NO_WRITE) ")"
#endif #endif
@ -2770,9 +2715,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC #ifdef CONFIG_SCSI_ACORNSCSI_SYNC
" SYNC" " SYNC"
#endif #endif
#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
" TAG"
#endif
#if (DEBUG & DEBUG_NO_WRITE) #if (DEBUG & DEBUG_NO_WRITE)
" NOWRITE (" __stringify(NO_WRITE) ")" " NOWRITE (" __stringify(NO_WRITE) ")"
#endif #endif
@ -2827,9 +2769,8 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
seq_printf(m, "Device/Lun TaggedQ Sync\n"); seq_printf(m, "Device/Lun TaggedQ Sync\n");
seq_printf(m, " %d/%llu ", scd->id, scd->lun); seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported) if (scd->tagged_supported)
seq_printf(m, "%3sabled(%3d) ", seq_printf(m, "%3sabled ",
scd->simple_tags ? "en" : "dis", scd->simple_tags ? "en" : "dis");
scd->current_tag);
else else
seq_printf(m, "unsupported "); seq_printf(m, "unsupported ");

View File

@ -77,7 +77,6 @@
* I was thinking that this was a good chip until I found this restriction ;( * I was thinking that this was a good chip until I found this restriction ;(
*/ */
#define SCSI2_SYNC #define SCSI2_SYNC
#undef SCSI2_TAG
#undef DEBUG_CONNECT #undef DEBUG_CONNECT
#undef DEBUG_MESSAGES #undef DEBUG_MESSAGES
@ -990,7 +989,7 @@ fas216_reselected_intr(FAS216_Info *info)
info->scsi.disconnectable = 0; info->scsi.disconnectable = 0;
if (info->SCpnt->device->id == target && if (info->SCpnt->device->id == target &&
info->SCpnt->device->lun == lun && info->SCpnt->device->lun == lun &&
info->SCpnt->tag == tag) { scsi_cmd_to_rq(info->SCpnt)->tag == tag) {
fas216_log(info, LOG_CONNECT, "reconnected previously executing command"); fas216_log(info, LOG_CONNECT, "reconnected previously executing command");
} else { } else {
queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt);
@ -1791,8 +1790,9 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
/* /*
* add tag message if required * add tag message if required
*/ */
if (SCpnt->tag) if (SCpnt->device->simple_tags)
msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, SCpnt->tag); msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG,
scsi_cmd_to_rq(SCpnt)->tag);
do { do {
#ifdef SCSI2_SYNC #ifdef SCSI2_SYNC
@ -1815,20 +1815,8 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt) static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt)
{ {
#ifdef SCSI2_TAG set_bit(SCpnt->device->id * 8 +
/* (u8)(SCpnt->device->lun & 0x7), info->busyluns);
* tagged queuing - allocate a new tag to this command
*/
if (SCpnt->device->simple_tags && SCpnt->cmnd[0] != REQUEST_SENSE &&
SCpnt->cmnd[0] != INQUIRY) {
SCpnt->device->current_tag += 1;
if (SCpnt->device->current_tag == 0)
SCpnt->device->current_tag = 1;
SCpnt->tag = SCpnt->device->current_tag;
} else
#endif
set_bit(SCpnt->device->id * 8 +
(u8)(SCpnt->device->lun & 0x7), info->busyluns);
info->stats.removes += 1; info->stats.removes += 1;
switch (SCpnt->cmnd[0]) { switch (SCpnt->cmnd[0]) {
@ -2117,7 +2105,6 @@ request_sense:
init_SCp(SCpnt); init_SCp(SCpnt);
SCpnt->SCp.Message = 0; SCpnt->SCp.Message = 0;
SCpnt->SCp.Status = 0; SCpnt->SCp.Status = 0;
SCpnt->tag = 0;
SCpnt->host_scribble = (void *)fas216_rq_sns_done; SCpnt->host_scribble = (void *)fas216_rq_sns_done;
/* /*
@ -2223,7 +2210,6 @@ static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
init_SCp(SCpnt); init_SCp(SCpnt);
info->stats.queues += 1; info->stats.queues += 1;
SCpnt->tag = 0;
spin_lock(&info->host_lock); spin_lock(&info->host_lock);
@ -3003,9 +2989,8 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
dev = &info->device[scd->id]; dev = &info->device[scd->id];
seq_printf(m, " %d/%llu ", scd->id, scd->lun); seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported) if (scd->tagged_supported)
seq_printf(m, "%3sabled(%3d) ", seq_printf(m, "%3sabled ",
scd->simple_tags ? "en" : "dis", scd->simple_tags ? "en" : "dis");
scd->current_tag);
else else
seq_puts(m, "unsupported "); seq_puts(m, "unsupported ");

View File

@ -214,7 +214,7 @@ struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun,
list_for_each(l, &queue->head) { list_for_each(l, &queue->head) {
QE_t *q = list_entry(l, QE_t, list); QE_t *q = list_entry(l, QE_t, list);
if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun && if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun &&
q->SCpnt->tag == tag) { scsi_cmd_to_rq(q->SCpnt)->tag == tag) {
SCpnt = __queue_remove(queue, l); SCpnt = __queue_remove(queue, l);
break; break;
} }

View File

@ -1254,3 +1254,4 @@ MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION); MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5); MODULE_FIRMWARE(FW_FNAME_T5);
MODULE_FIRMWARE(FW_FNAME_T6); MODULE_FIRMWARE(FW_FNAME_T6);
MODULE_SOFTDEP("pre: cxgb4");

View File

@ -880,11 +880,11 @@ efct_lio_npiv_drop_nport(struct se_wwn *wwn)
struct efct *efct = lio_vport->efct; struct efct *efct = lio_vport->efct;
unsigned long flags = 0; unsigned long flags = 0;
spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
if (lio_vport->fc_vport) if (lio_vport->fc_vport)
fc_vport_terminate(lio_vport->fc_vport); fc_vport_terminate(lio_vport->fc_vport);
spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list, list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
list_entry) { list_entry) {
if (vport->lio_vport == lio_vport) { if (vport->lio_vport == lio_vport) {

View File

@ -32,7 +32,7 @@ efct_scsi_io_alloc(struct efct_node *node)
struct efct *efct; struct efct *efct;
struct efct_xport *xport; struct efct_xport *xport;
struct efct_io *io; struct efct_io *io;
unsigned long flags = 0; unsigned long flags;
efct = node->efct; efct = node->efct;
@ -42,7 +42,6 @@ efct_scsi_io_alloc(struct efct_node *node)
if (!io) { if (!io) {
efc_log_err(efct, "IO alloc Failed\n"); efc_log_err(efct, "IO alloc Failed\n");
atomic_add_return(1, &xport->io_alloc_failed_count); atomic_add_return(1, &xport->io_alloc_failed_count);
spin_unlock_irqrestore(&node->active_ios_lock, flags);
return NULL; return NULL;
} }

View File

@ -928,22 +928,21 @@ __efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
break; break;
case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: { case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {
enum efc_nport_topology topology = enum efc_nport_topology *topology = arg;
(enum efc_nport_topology)arg;
WARN_ON(node->nport->domain->attached); WARN_ON(node->nport->domain->attached);
WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI); WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
node_printf(node, "topology notification, topology=%d\n", node_printf(node, "topology notification, topology=%d\n",
topology); *topology);
/* At the time the PLOGI was received, the topology was unknown, /* At the time the PLOGI was received, the topology was unknown,
* so we didn't know which node would perform the domain attach: * so we didn't know which node would perform the domain attach:
* 1. The node from which the PLOGI was sent (p2p) or * 1. The node from which the PLOGI was sent (p2p) or
* 2. The node to which the FLOGI was sent (fabric). * 2. The node to which the FLOGI was sent (fabric).
*/ */
if (topology == EFC_NPORT_TOPO_P2P) { if (*topology == EFC_NPORT_TOPO_P2P) {
/* if this is p2p, need to attach to the domain using /* if this is p2p, need to attach to the domain using
* the d_id from the PLOGI received * the d_id from the PLOGI received
*/ */

View File

@ -107,7 +107,6 @@ void
efc_fabric_notify_topology(struct efc_node *node) efc_fabric_notify_topology(struct efc_node *node)
{ {
struct efc_node *tmp_node; struct efc_node *tmp_node;
enum efc_nport_topology topology = node->nport->topology;
unsigned long index; unsigned long index;
/* /*
@ -118,7 +117,7 @@ efc_fabric_notify_topology(struct efc_node *node)
if (tmp_node != node) { if (tmp_node != node) {
efc_node_post_event(tmp_node, efc_node_post_event(tmp_node,
EFC_EVT_NPORT_TOPOLOGY_NOTIFY, EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
(void *)topology); &node->nport->topology);
} }
} }
} }

View File

@ -2281,11 +2281,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
return FAILED; return FAILED;
} }
conn = session->leadconn;
iscsi_get_conn(conn->cls_conn);
conn->eh_abort_cnt++;
age = session->age;
spin_lock(&session->back_lock); spin_lock(&session->back_lock);
task = (struct iscsi_task *)sc->SCp.ptr; task = (struct iscsi_task *)sc->SCp.ptr;
if (!task || !task->sc) { if (!task || !task->sc) {
@ -2293,8 +2288,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "sc completed while abort in progress\n"); ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
spin_unlock(&session->back_lock); spin_unlock(&session->back_lock);
goto success; spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
} }
conn = session->leadconn;
iscsi_get_conn(conn->cls_conn);
conn->eh_abort_cnt++;
age = session->age;
ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt); ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
__iscsi_get_task(task); __iscsi_get_task(task);
spin_unlock(&session->back_lock); spin_unlock(&session->back_lock);

View File

@ -285,11 +285,8 @@ buffer_done:
"6312 Catching potential buffer " "6312 Catching potential buffer "
"overflow > PAGE_SIZE = %lu bytes\n", "overflow > PAGE_SIZE = %lu bytes\n",
PAGE_SIZE); PAGE_SIZE);
strscpy(buf + PAGE_SIZE - 1 - strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1), LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1);
LPFC_INFO_MORE_STR,
strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1)
+ 1);
} }
return len; return len;
} }
@ -6204,7 +6201,8 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n", len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n", len += scnprintf(buf + len, PAGE_SIZE - len,
"Cfg: %d SCSI: %d NVME: %d\n",
phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
phba->cfg_nvme_seg_cnt); phba->cfg_nvme_seg_cnt);
return len; return len;

View File

@ -4021,11 +4021,11 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
be32_to_cpu(pcgd->desc_tag), be32_to_cpu(pcgd->desc_tag),
be32_to_cpu(pcgd->desc_len), be32_to_cpu(pcgd->desc_len),
be32_to_cpu(pcgd->xmt_signal_capability), be32_to_cpu(pcgd->xmt_signal_capability),
be32_to_cpu(pcgd->xmt_signal_frequency.count), be16_to_cpu(pcgd->xmt_signal_frequency.count),
be32_to_cpu(pcgd->xmt_signal_frequency.units), be16_to_cpu(pcgd->xmt_signal_frequency.units),
be32_to_cpu(pcgd->rcv_signal_capability), be32_to_cpu(pcgd->rcv_signal_capability),
be32_to_cpu(pcgd->rcv_signal_frequency.count), be16_to_cpu(pcgd->rcv_signal_frequency.count),
be32_to_cpu(pcgd->rcv_signal_frequency.units)); be16_to_cpu(pcgd->rcv_signal_frequency.units));
/* Compare driver and Fport capabilities and choose /* Compare driver and Fport capabilities and choose
* least common. * least common.
@ -9396,7 +9396,7 @@ lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
/* Extract the next WWPN from the payload */ /* Extract the next WWPN from the payload */
wwn = *wwnlist++; wwn = *wwnlist++;
wwpn = be64_to_cpu(wwn); wwpn = be64_to_cpu(wwn);
len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ, len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len,
" %016llx", wwpn); " %016llx", wwpn);
/* Log a message if we are on the last WWPN /* Log a message if we are on the last WWPN

View File

@ -1171,7 +1171,7 @@ struct lpfc_mbx_read_object { /* Version 0 */
#define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF #define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF
#define lpfc_mbx_rd_object_rlen_WORD word0 #define lpfc_mbx_rd_object_rlen_WORD word0
uint32_t rd_object_offset; uint32_t rd_object_offset;
uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; __le32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
#define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */ #define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */
uint32_t rd_object_cnt; uint32_t rd_object_cnt;
struct lpfc_mbx_host_buf rd_object_hbuf[4]; struct lpfc_mbx_host_buf rd_object_hbuf[4];

View File

@ -5533,7 +5533,7 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
if (phba->cgn_fpin_frequency && if (phba->cgn_fpin_frequency &&
phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
cp->cgn_stat_npm = cpu_to_le32(value); cp->cgn_stat_npm = value;
} }
value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
LPFC_CGN_CRC32_SEED); LPFC_CGN_CRC32_SEED);
@ -5562,9 +5562,9 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
uint32_t mbps; uint32_t mbps;
uint32_t dvalue, wvalue, lvalue, avalue; uint32_t dvalue, wvalue, lvalue, avalue;
uint64_t latsum; uint64_t latsum;
uint16_t *ptr; __le16 *ptr;
uint32_t *lptr; __le32 *lptr;
uint16_t *mptr; __le16 *mptr;
/* Make sure we have a congestion info buffer */ /* Make sure we have a congestion info buffer */
if (!phba->cgn_i) if (!phba->cgn_i)
@ -5585,7 +5585,7 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
if (phba->cgn_fpin_frequency && if (phba->cgn_fpin_frequency &&
phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
cp->cgn_stat_npm = cpu_to_le32(value); cp->cgn_stat_npm = value;
} }
/* Read and clear the latency counters for this minute */ /* Read and clear the latency counters for this minute */
@ -5768,7 +5768,7 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
mbps += le32_to_cpu(cp->cgn_bw_hr[i]); mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
} }
if (lvalue) /* Avg of latency averages */ if (lvalue) /* Avg of latency averages */
@ -8304,11 +8304,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
return 0; return 0;
out_free_hba_hdwq_info: out_free_hba_hdwq_info:
free_percpu(phba->sli4_hba.c_stat);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
free_percpu(phba->sli4_hba.c_stat);
out_free_hba_idle_stat: out_free_hba_idle_stat:
kfree(phba->sli4_hba.idle_stat);
#endif #endif
kfree(phba->sli4_hba.idle_stat);
out_free_hba_eq_info: out_free_hba_eq_info:
free_percpu(phba->sli4_hba.eq_info); free_percpu(phba->sli4_hba.eq_info);
out_free_hba_cpu_map: out_free_hba_cpu_map:
@ -13447,8 +13447,8 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
/* last used Index initialized to 0xff already */ /* last used Index initialized to 0xff already */
cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ; cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ; cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
cp->cgn_info_crc = cpu_to_le32(crc); cp->cgn_info_crc = cpu_to_le32(crc);

View File

@ -1492,9 +1492,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_qhandle *lpfc_queue_info; struct lpfc_nvme_qhandle *lpfc_queue_info;
struct lpfc_nvme_fcpreq_priv *freqpriv; struct lpfc_nvme_fcpreq_priv *freqpriv;
struct nvme_common_command *sqe; struct nvme_common_command *sqe;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0; uint64_t start = 0;
#endif
/* Validate pointers. LLDD fault handling with transport does /* Validate pointers. LLDD fault handling with transport does
* have timing races. * have timing races.

View File

@ -1514,7 +1514,6 @@ static int
lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint8_t *txop, uint8_t *rxop) uint8_t *txop, uint8_t *rxop)
{ {
uint8_t ret = 0;
if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
switch (scsi_get_prot_op(sc)) { switch (scsi_get_prot_op(sc)) {
@ -1567,7 +1566,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
} }
} }
return ret; return 0;
} }
#endif #endif
@ -5598,12 +5597,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err, idx; int err, idx;
u8 *uuid = NULL; u8 *uuid = NULL;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint64_t start;
uint64_t start = 0L;
if (phba->ktime_on)
start = ktime_get_ns();
#endif
start = ktime_get_ns(); start = ktime_get_ns();
rdata = lpfc_rport_data_from_scsi_device(cmnd->device); rdata = lpfc_rport_data_from_scsi_device(cmnd->device);

View File

@ -12308,12 +12308,12 @@ void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb) struct lpfc_iocbq *rspiocb)
{ {
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; struct lpfc_nodelist *ndlp = NULL;
IOCB_t *irsp = &rspiocb->iocb; IOCB_t *irsp = &rspiocb->iocb;
/* ELS cmd tag <ulpIoTag> completes */ /* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS, lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"0139 Ignoring ELS cmd tag x%x completion Data: " "0139 Ignoring ELS cmd code x%x completion Data: "
"x%x x%x x%x\n", "x%x x%x x%x\n",
irsp->ulpIoTag, irsp->ulpStatus, irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout); irsp->un.ulpWord[4], irsp->ulpTimeout);
@ -12321,10 +12321,13 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
* if exchange is busy. * if exchange is busy.
*/ */
if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb); lpfc_ct_free_iocb(phba, cmdiocb);
else } else {
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
lpfc_els_free_iocb(phba, cmdiocb); lpfc_els_free_iocb(phba, cmdiocb);
}
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
} }
@ -22186,6 +22189,7 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
uint32_t shdr_status, shdr_add_status; uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr; union lpfc_sli4_cfg_shdr *shdr;
struct lpfc_dmabuf *pcmd; struct lpfc_dmabuf *pcmd;
u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
/* sanity check on queue memory */ /* sanity check on queue memory */
if (!datap) if (!datap)
@ -22209,10 +22213,10 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
memset((void *)read_object->u.request.rd_object_name, 0, memset((void *)read_object->u.request.rd_object_name, 0,
LPFC_OBJ_NAME_SZ); LPFC_OBJ_NAME_SZ);
sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject); scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
for (j = 0; j < strlen(rdobject); j++) for (j = 0; j < strlen(rdobject); j++)
read_object->u.request.rd_object_name[j] = read_object->u.request.rd_object_name[j] =
cpu_to_le32(read_object->u.request.rd_object_name[j]); cpu_to_le32(rd_object_name[j]);
pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
if (pcmd) if (pcmd)

View File

@ -1916,7 +1916,7 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
raid = MR_LdRaidGet(ld, local_map_ptr); raid = MR_LdRaidGet(ld, local_map_ptr);
if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
blk_queue_update_dma_alignment(sdev->request_queue, 0x7); blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
mr_device_priv_data->is_tm_capable = mr_device_priv_data->is_tm_capable =
raid->capability.tmCapable; raid->capability.tmCapable;
@ -8033,7 +8033,7 @@ skip_firing_dcmds:
if (instance->adapter_type != MFI_SERIES) { if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance); megasas_release_fusion(instance);
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) * (sizeof(struct MR_PD_CFG_SEQ) *
(MAX_PHYSICAL_DEVICES - 1)); (MAX_PHYSICAL_DEVICES - 1));
for (i = 0; i < 2 ; i++) { for (i = 0; i < 2 ; i++) {
@ -8773,8 +8773,7 @@ int megasas_update_device_list(struct megasas_instance *instance,
if (event_type & SCAN_VD_CHANNEL) { if (event_type & SCAN_VD_CHANNEL) {
if (!instance->requestorId || if (!instance->requestorId ||
(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)) {
megasas_get_ld_vf_affiliation(instance, 0))) {
dcmd_ret = megasas_ld_list_query(instance, dcmd_ret = megasas_ld_list_query(instance,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
if (dcmd_ret != DCMD_SUCCESS) if (dcmd_ret != DCMD_SUCCESS)

View File

@ -1582,8 +1582,10 @@ mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
* wait for current poll to complete. * wait for current poll to complete.
*/ */
for (qid = 0; qid < iopoll_q_count; qid++) { for (qid = 0; qid < iopoll_q_count; qid++) {
while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
cpu_relax();
udelay(500); udelay(500);
}
} }
} }

View File

@ -2178,7 +2178,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
mpt3sas_check_cmd_timeout(ioc, mpt3sas_check_cmd_timeout(ioc,
ioc->ctl_cmds.status, mpi_request, ioc->ctl_cmds.status, mpi_request,
sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed); sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
*issue_reset = reset_needed; *issue_reset = reset_needed;
rc = -EFAULT; rc = -EFAULT;
goto out; goto out;
} }

View File

@ -10749,8 +10749,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
_scsih_pcie_topology_change_event(ioc, fw_event); _scsih_pcie_topology_change_event(ioc, fw_event);
ioc->current_event = NULL; ioc->current_event = NULL;
return; return;
break;
} }
out: out:
fw_event_work_put(fw_event); fw_event_work_put(fw_event);

View File

@ -1939,11 +1939,8 @@ static void ncr_start_next_ccb (struct ncb *np, struct lcb * lp, int maxn);
static void ncr_put_start_queue(struct ncb *np, struct ccb *cp); static void ncr_put_start_queue(struct ncb *np, struct ccb *cp);
static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd); static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd);
static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd);
static void process_waiting_list(struct ncb *np, int sts); static void process_waiting_list(struct ncb *np, int sts);
#define remove_from_waiting_list(np, cmd) \
retrieve_from_waiting_list(1, (np), (cmd))
#define requeue_waiting_list(np) process_waiting_list((np), DID_OK) #define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
#define reset_waiting_list(np) process_waiting_list((np), DID_RESET) #define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
@ -7997,26 +7994,6 @@ static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd)
} }
} }
static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd)
{
struct scsi_cmnd **pcmd = &np->waiting_list;
while (*pcmd) {
if (cmd == *pcmd) {
if (to_remove) {
*pcmd = (struct scsi_cmnd *) cmd->next_wcmd;
cmd->next_wcmd = NULL;
}
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
#endif
return cmd;
}
pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd;
}
return NULL;
}
static void process_waiting_list(struct ncb *np, int sts) static void process_waiting_list(struct ncb *np, int sts)
{ {
struct scsi_cmnd *waiting_list, *wcmd; struct scsi_cmnd *waiting_list, *wcmd;

View File

@ -7170,7 +7170,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
return 0; return 0;
break; break;
case QLA2XXX_INI_MODE_DUAL: case QLA2XXX_INI_MODE_DUAL:
if (!qla_dual_mode_enabled(vha)) if (!qla_dual_mode_enabled(vha) &&
!qla_ini_mode_enabled(vha))
return 0; return 0;
break; break;
case QLA2XXX_INI_MODE_ENABLED: case QLA2XXX_INI_MODE_ENABLED:

View File

@ -2634,7 +2634,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
} }
if (unlikely(logit)) if (unlikely(logit))
ql_log(ql_log_warn, fcport->vha, 0x5060, ql_log(ql_dbg_io, fcport->vha, 0x5060,
"NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
sp->name, sp->handle, comp_status, sp->name, sp->handle, comp_status,
fd->transferred_length, le32_to_cpu(sts->residual_len), fd->transferred_length, le32_to_cpu(sts->residual_len),
@ -3491,7 +3491,7 @@ check_scsi_status:
out: out:
if (logit) if (logit)
ql_log(ql_log_warn, fcport->vha, 0x3022, ql_log(ql_dbg_io, fcport->vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
comp_status, scsi_status, res, vha->host_no, comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain, cp->device->id, cp->device->lun, fcport->d_id.b.domain,

View File

@ -441,9 +441,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct iscsi_transport *t = iface->transport; struct iscsi_transport *t = iface->transport;
int param = -1; int param = -1;
if (attr == &dev_attr_iface_enabled.attr) if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
param = ISCSI_NET_PARAM_IFACE_ENABLE;
else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
else if (attr == &dev_attr_iface_header_digest.attr) else if (attr == &dev_attr_iface_header_digest.attr)
param = ISCSI_IFACE_PARAM_HDRDGST_EN; param = ISCSI_IFACE_PARAM_HDRDGST_EN;
@ -483,7 +481,9 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
if (param != -1) if (param != -1)
return t->attr_is_visible(ISCSI_IFACE_PARAM, param); return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
if (attr == &dev_attr_iface_vlan_id.attr) if (attr == &dev_attr_iface_enabled.attr)
param = ISCSI_NET_PARAM_IFACE_ENABLE;
else if (attr == &dev_attr_iface_vlan_id.attr)
param = ISCSI_NET_PARAM_VLAN_ID; param = ISCSI_NET_PARAM_VLAN_ID;
else if (attr == &dev_attr_iface_vlan_priority.attr) else if (attr == &dev_attr_iface_vlan_priority.attr)
param = ISCSI_NET_PARAM_VLAN_PRIORITY; param = ISCSI_NET_PARAM_VLAN_PRIORITY;

View File

@ -2124,6 +2124,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
retries = 0; retries = 0;
do { do {
bool media_was_present = sdkp->media_present;
cmd[0] = TEST_UNIT_READY; cmd[0] = TEST_UNIT_READY;
memset((void *) &cmd[1], 0, 9); memset((void *) &cmd[1], 0, 9);
@ -2138,7 +2140,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
* with any more polling. * with any more polling.
*/ */
if (media_not_present(sdkp, &sshdr)) { if (media_not_present(sdkp, &sshdr)) {
sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); if (media_was_present)
sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
return; return;
} }
@ -3408,15 +3411,16 @@ static int sd_probe(struct device *dev)
} }
device_initialize(&sdkp->dev); device_initialize(&sdkp->dev);
sdkp->dev.parent = dev; sdkp->dev.parent = get_device(dev);
sdkp->dev.class = &sd_disk_class; sdkp->dev.class = &sd_disk_class;
dev_set_name(&sdkp->dev, "%s", dev_name(dev)); dev_set_name(&sdkp->dev, "%s", dev_name(dev));
error = device_add(&sdkp->dev); error = device_add(&sdkp->dev);
if (error) if (error) {
goto out_free_index; put_device(&sdkp->dev);
goto out;
}
get_device(dev);
dev_set_drvdata(dev, sdkp); dev_set_drvdata(dev, sdkp);
gd->major = sd_major((index & 0xf0) >> 4); gd->major = sd_major((index & 0xf0) >> 4);

View File

@ -154,8 +154,8 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
/* /*
* Report zone buffer size should be at most 64B times the number of * Report zone buffer size should be at most 64B times the number of
* zones requested plus the 64B reply header, but should be at least * zones requested plus the 64B reply header, but should be aligned
* SECTOR_SIZE for ATA devices. * to SECTOR_SIZE for ATA devices.
* Make sure that this size does not exceed the hardware capabilities. * Make sure that this size does not exceed the hardware capabilities.
* Furthermore, since the report zone command cannot be split, make * Furthermore, since the report zone command cannot be split, make
* sure that the allocated buffer can always be mapped by limiting the * sure that the allocated buffer can always be mapped by limiting the
@ -174,7 +174,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
*buflen = bufsize; *buflen = bufsize;
return buf; return buf;
} }
bufsize >>= 1; bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);
} }
return NULL; return NULL;
@ -280,7 +280,7 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
{ {
struct scsi_disk *sdkp; struct scsi_disk *sdkp;
unsigned long flags; unsigned long flags;
unsigned int zno; sector_t zno;
int ret; int ret;
sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work); sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);

View File

@ -87,9 +87,16 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
0 0
}; };
unsigned char recv_page_code; unsigned char recv_page_code;
unsigned int retries = SES_RETRIES;
struct scsi_sense_hdr sshdr;
do {
ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
&sshdr, SES_TIMEOUT, 1, NULL);
} while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&
(sshdr.sense_key == NOT_READY ||
(sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
NULL, SES_TIMEOUT, SES_RETRIES, NULL);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
@ -111,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
static int ses_send_diag(struct scsi_device *sdev, int page_code, static int ses_send_diag(struct scsi_device *sdev, int page_code,
void *buf, int bufflen) void *buf, int bufflen)
{ {
u32 result; int result;
unsigned char cmd[] = { unsigned char cmd[] = {
SEND_DIAGNOSTIC, SEND_DIAGNOSTIC,
@ -121,9 +128,16 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
bufflen & 0xff, bufflen & 0xff,
0 0
}; };
struct scsi_sense_hdr sshdr;
unsigned int retries = SES_RETRIES;
do {
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
&sshdr, SES_TIMEOUT, 1, NULL);
} while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&
(sshdr.sense_key == NOT_READY ||
(sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
NULL, SES_TIMEOUT, SES_RETRIES, NULL);
if (result) if (result)
sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n", sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
result); result);

View File

@ -523,7 +523,7 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest
return rc; return rc;
cd->readcd_known = 0; cd->readcd_known = 0;
sr_printk(KERN_INFO, cd, sr_printk(KERN_INFO, cd,
"CDROM does'nt support READ CD (0xbe) command\n"); "CDROM doesn't support READ CD (0xbe) command\n");
/* fall & retry the other way */ /* fall & retry the other way */
} }
/* ... if this fails, we switch the blocksize using MODE SELECT */ /* ... if this fails, we switch the blocksize using MODE SELECT */

View File

@ -3823,6 +3823,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
case CDROM_SEND_PACKET: case CDROM_SEND_PACKET:
if (!capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
break;
default: default:
break; break;
} }

View File

@ -128,6 +128,81 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
return err; return err;
} }
static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
{
struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
int ret;
pwr_info.lane_rx = lanes;
pwr_info.lane_tx = lanes;
ret = ufshcd_config_pwr_mode(hba, &pwr_info);
if (ret)
dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
__func__, lanes, ret);
return ret;
}
static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int err = 0;
switch (status) {
case PRE_CHANGE:
if (ufshcd_is_hs_mode(dev_max_params) &&
(hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
ufs_intel_set_lanes(hba, 2);
memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
break;
case POST_CHANGE:
if (ufshcd_is_hs_mode(dev_req_params)) {
u32 peer_granularity;
usleep_range(1000, 1250);
err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
&peer_granularity);
}
break;
default:
break;
}
return err;
}
static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
{
u32 granularity, peer_granularity;
u32 pa_tactivate, peer_pa_tactivate;
int ret;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
if (ret)
goto out;
ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
if (ret)
goto out;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
if (ret)
goto out;
ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
if (ret)
goto out;
if (granularity == peer_granularity) {
u32 new_peer_pa_tactivate = pa_tactivate + 2;
ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
}
out:
return ret;
}
#define INTEL_ACTIVELTR 0x804 #define INTEL_ACTIVELTR 0x804
#define INTEL_IDLELTR 0x808 #define INTEL_IDLELTR 0x808
@ -351,6 +426,7 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
struct ufs_host *ufs_host; struct ufs_host *ufs_host;
int err; int err;
hba->nop_out_timeout = 200;
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
hba->caps |= UFSHCD_CAP_CRYPTO; hba->caps |= UFSHCD_CAP_CRYPTO;
err = ufs_intel_common_init(hba); err = ufs_intel_common_init(hba);
@ -381,6 +457,8 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
.exit = ufs_intel_common_exit, .exit = ufs_intel_common_exit,
.hce_enable_notify = ufs_intel_hce_enable_notify, .hce_enable_notify = ufs_intel_hce_enable_notify,
.link_startup_notify = ufs_intel_link_startup_notify, .link_startup_notify = ufs_intel_link_startup_notify,
.pwr_change_notify = ufs_intel_lkf_pwr_change_notify,
.apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks,
.resume = ufs_intel_resume, .resume = ufs_intel_resume,
.device_reset = ufs_intel_device_reset, .device_reset = ufs_intel_device_reset,
}; };

View File

@ -17,8 +17,6 @@
#include <linux/blk-pm.h> #include <linux/blk-pm.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <scsi/scsi_driver.h> #include <scsi/scsi_driver.h>
#include <scsi/scsi_transport.h>
#include "../scsi_transport_api.h"
#include "ufshcd.h" #include "ufshcd.h"
#include "ufs_quirks.h" #include "ufs_quirks.h"
#include "unipro.h" #include "unipro.h"
@ -238,6 +236,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba); static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba, static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode); struct ufs_pa_layer_attr *pwr_mode);
static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@ -320,8 +319,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t) enum ufs_trace_str_t str_t)
{ {
int off = (int)tag - hba->nutrs; struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
if (!trace_ufshcd_upiu_enabled()) if (!trace_ufshcd_upiu_enabled())
return; return;
@ -2760,8 +2758,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
out: out:
up_read(&hba->clk_scaling_lock); up_read(&hba->clk_scaling_lock);
if (ufs_trigger_eh()) if (ufs_trigger_eh()) {
scsi_schedule_eh(hba->host); unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
return err; return err;
} }
@ -3920,35 +3923,6 @@ out:
} }
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
{
lockdep_assert_held(hba->host->host_lock);
return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
(hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
}
static void ufshcd_schedule_eh(struct ufs_hba *hba)
{
bool schedule_eh = false;
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
/* handle fatal errors only when link is not in error state */
if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
if (hba->force_reset || ufshcd_is_link_broken(hba) ||
ufshcd_is_saved_err_fatal(hba))
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
else
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
schedule_eh = true;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (schedule_eh)
scsi_schedule_eh(hba->host);
}
/** /**
* ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
* state) and waits for it to take effect. * state) and waits for it to take effect.
@ -3969,7 +3943,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
{ {
DECLARE_COMPLETION_ONSTACK(uic_async_done); DECLARE_COMPLETION_ONSTACK(uic_async_done);
unsigned long flags; unsigned long flags;
bool schedule_eh = false;
u8 status; u8 status;
int ret; int ret;
bool reenable_intr = false; bool reenable_intr = false;
@ -4039,14 +4012,10 @@ out:
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
if (ret) { if (ret) {
ufshcd_set_link_broken(hba); ufshcd_set_link_broken(hba);
schedule_eh = true; ufshcd_schedule_eh_work(hba);
} }
out_unlock: out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
if (schedule_eh)
ufshcd_schedule_eh(hba);
mutex_unlock(&hba->uic_cmd_mutex); mutex_unlock(&hba->uic_cmd_mutex);
return ret; return ret;
@ -4776,7 +4745,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
mutex_lock(&hba->dev_cmd.lock); mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
NOP_OUT_TIMEOUT); hba->nop_out_timeout);
if (!err || err == -ETIMEDOUT) if (!err || err == -ETIMEDOUT)
break; break;
@ -5931,6 +5900,27 @@ out:
return err_handling; return err_handling;
} }
/* host lock must be held before calling this func */
static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
{
return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
(hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
}
/* host lock must be held before calling this func */
static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
{
/* handle fatal errors only when link is not in error state */
if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
if (hba->force_reset || ufshcd_is_link_broken(hba) ||
ufshcd_is_saved_err_fatal(hba))
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
else
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
queue_work(hba->eh_wq, &hba->eh_work);
}
}
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{ {
down_write(&hba->clk_scaling_lock); down_write(&hba->clk_scaling_lock);
@ -6063,12 +6053,12 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
/** /**
* ufshcd_err_handler - handle UFS errors that require s/w attention * ufshcd_err_handler - handle UFS errors that require s/w attention
* @host: SCSI host pointer * @work: pointer to work structure
*/ */
static void ufshcd_err_handler(struct Scsi_Host *host) static void ufshcd_err_handler(struct work_struct *work)
{ {
struct ufs_hba *hba = shost_priv(host);
int retries = MAX_ERR_HANDLER_RETRIES; int retries = MAX_ERR_HANDLER_RETRIES;
struct ufs_hba *hba;
unsigned long flags; unsigned long flags;
bool needs_restore; bool needs_restore;
bool needs_reset; bool needs_reset;
@ -6077,9 +6067,10 @@ static void ufshcd_err_handler(struct Scsi_Host *host)
int pmc_err; int pmc_err;
int tag; int tag;
hba = container_of(work, struct ufs_hba, eh_work);
down(&hba->host_sem); down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
hba->host->host_eh_scheduled = 0;
if (ufshcd_err_handling_should_stop(hba)) { if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR) if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
@ -6407,6 +6398,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
"host_regs: "); "host_regs: ");
ufshcd_print_pwr_info(hba); ufshcd_print_pwr_info(hba);
} }
ufshcd_schedule_eh_work(hba);
retval |= IRQ_HANDLED; retval |= IRQ_HANDLED;
} }
/* /*
@ -6418,34 +6410,9 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
hba->errors = 0; hba->errors = 0;
hba->uic_error = 0; hba->uic_error = 0;
spin_unlock(hba->host->host_lock); spin_unlock(hba->host->host_lock);
if (queue_eh_work)
ufshcd_schedule_eh(hba);
return retval; return retval;
} }
struct ctm_info {
struct ufs_hba *hba;
unsigned long pending;
unsigned int ncpl;
};
static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
{
struct ctm_info *const ci = priv;
struct completion *c;
WARN_ON_ONCE(reserved);
if (test_bit(req->tag, &ci->pending))
return true;
ci->ncpl++;
c = req->end_io_data;
if (c)
complete(c);
return true;
}
/** /**
* ufshcd_tmc_handler - handle task management function completion * ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance * @hba: per adapter instance
@ -6456,18 +6423,24 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
*/ */
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{ {
unsigned long flags; unsigned long flags, pending, issued;
struct request_queue *q = hba->tmf_queue; irqreturn_t ret = IRQ_NONE;
struct ctm_info ci = { int tag;
.hba = hba,
}; pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); issued = hba->outstanding_tasks & ~pending;
blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci); for_each_set_bit(tag, &issued, hba->nutmrs) {
struct request *req = hba->tmf_rqs[tag];
struct completion *c = req->end_io_data;
complete(c);
ret = IRQ_HANDLED;
}
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
return ci.ncpl ? IRQ_HANDLED : IRQ_NONE; return ret;
} }
/** /**
@ -6590,9 +6563,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
ufshcd_hold(hba, false); ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags); spin_lock_irqsave(host->host_lock, flags);
blk_mq_start_request(req);
task_tag = req->tag; task_tag = req->tag;
hba->tmf_rqs[req->tag] = req;
treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag); treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
@ -6633,6 +6606,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
} }
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
hba->tmf_rqs[req->tag] = NULL;
__clear_bit(task_tag, &hba->outstanding_tasks); __clear_bit(task_tag, &hba->outstanding_tasks);
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
@ -6912,7 +6886,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos); err = ufshcd_clear_cmd(hba, pos);
if (err) if (err)
break; break;
__ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true); __ufshcd_transfer_req_compl(hba, 1U << pos, false);
} }
} }
@ -7084,17 +7058,15 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* will be to send LU reset which, again, is a spec violation. * will be to send LU reset which, again, is a spec violation.
* To avoid these unnecessary/illegal steps, first we clean up * To avoid these unnecessary/illegal steps, first we clean up
* the lrb taken by this cmd and re-set it in outstanding_reqs, * the lrb taken by this cmd and re-set it in outstanding_reqs,
* then queue the error handler and bail. * then queue the eh_work and bail.
*/ */
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
spin_lock_irqsave(host->host_lock, flags); spin_lock_irqsave(host->host_lock, flags);
hba->force_reset = true; hba->force_reset = true;
ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(host->host_lock, flags); spin_unlock_irqrestore(host->host_lock, flags);
ufshcd_schedule_eh(hba);
goto release; goto release;
} }
@ -7237,10 +7209,11 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
hba->force_reset = true; hba->force_reset = true;
ufshcd_schedule_eh_work(hba);
dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handler(hba->host); flush_work(&hba->eh_work);
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_ERROR) if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
@ -8561,6 +8534,8 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) { if (hba->is_powered) {
ufshcd_exit_clk_scaling(hba); ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba); ufshcd_exit_clk_gating(hba);
if (hba->eh_wq)
destroy_workqueue(hba->eh_wq);
ufs_debugfs_hba_exit(hba); ufs_debugfs_hba_exit(hba);
ufshcd_variant_hba_exit(hba); ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false); ufshcd_setup_vreg(hba, false);
@ -9410,10 +9385,6 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
} }
static struct scsi_transport_template ufshcd_transport_template = {
.eh_strategy_handler = ufshcd_err_handler,
};
/** /**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA) * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle * @dev: pointer to device handle
@ -9440,11 +9411,11 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM; err = -ENOMEM;
goto out_error; goto out_error;
} }
host->transportt = &ufshcd_transport_template;
hba = shost_priv(host); hba = shost_priv(host);
hba->host = host; hba->host = host;
hba->dev = dev; hba->dev = dev;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
hba->nop_out_timeout = NOP_OUT_TIMEOUT;
INIT_LIST_HEAD(&hba->clk_list_head); INIT_LIST_HEAD(&hba->clk_list_head);
spin_lock_init(&hba->outstanding_lock); spin_lock_init(&hba->outstanding_lock);
@ -9479,6 +9450,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err; int err;
struct Scsi_Host *host = hba->host; struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev; struct device *dev = hba->dev;
char eh_wq_name[sizeof("ufs_eh_wq_00")];
if (!mmio_base) { if (!mmio_base) {
dev_err(hba->dev, dev_err(hba->dev,
@ -9532,6 +9504,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false; hba->max_pwr_info.is_valid = false;
/* Initialize work queues */
snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
hba->host->host_no);
hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
if (!hba->eh_wq) {
dev_err(hba->dev, "%s: failed to create eh workqueue\n",
__func__);
err = -ENOMEM;
goto out_disable;
}
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
sema_init(&hba->host_sem, 1); sema_init(&hba->host_sem, 1);
@ -9600,6 +9583,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = PTR_ERR(hba->tmf_queue); err = PTR_ERR(hba->tmf_queue);
goto free_tmf_tag_set; goto free_tmf_tag_set;
} }
hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
sizeof(*hba->tmf_rqs), GFP_KERNEL);
if (!hba->tmf_rqs) {
err = -ENOMEM;
goto free_tmf_queue;
}
/* Reset the attached device */ /* Reset the attached device */
ufshcd_device_reset(hba); ufshcd_device_reset(hba);

View File

@ -747,6 +747,8 @@ struct ufs_hba_monitor {
* @is_powered: flag to check if HBA is powered * @is_powered: flag to check if HBA is powered
* @shutting_down: flag to check if shutdown has been invoked * @shutting_down: flag to check if shutdown has been invoked
* @host_sem: semaphore used to serialize concurrent contexts * @host_sem: semaphore used to serialize concurrent contexts
* @eh_wq: Workqueue that eh_work works on
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events * @eeh_work: Worker to handle exception events
* @errors: HBA errors * @errors: HBA errors
* @uic_error: UFS interconnect layer error status * @uic_error: UFS interconnect layer error status
@ -836,6 +838,7 @@ struct ufs_hba {
struct blk_mq_tag_set tmf_tag_set; struct blk_mq_tag_set tmf_tag_set;
struct request_queue *tmf_queue; struct request_queue *tmf_queue;
struct request **tmf_rqs;
struct uic_command *active_uic_cmd; struct uic_command *active_uic_cmd;
struct mutex uic_cmd_mutex; struct mutex uic_cmd_mutex;
@ -853,6 +856,8 @@ struct ufs_hba {
struct semaphore host_sem; struct semaphore host_sem;
/* Work Queues */ /* Work Queues */
struct workqueue_struct *eh_wq;
struct work_struct eh_work;
struct work_struct eeh_work; struct work_struct eeh_work;
/* HBA Errors */ /* HBA Errors */
@ -868,6 +873,7 @@ struct ufs_hba {
/* Device management request data */ /* Device management request data */
struct ufs_dev_cmd dev_cmd; struct ufs_dev_cmd dev_cmd;
ktime_t last_dme_cmd_tstamp; ktime_t last_dme_cmd_tstamp;
int nop_out_timeout;
/* Keeps information of the UFS device connected to this host */ /* Keeps information of the UFS device connected to this host */
struct ufs_dev_info dev_info; struct ufs_dev_info dev_info;

View File

@ -333,9 +333,8 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
} }
static void static void
ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb, ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn, __be64 ppn, u8 transfer_len, int read_id)
u8 transfer_len, int read_id)
{ {
unsigned char *cdb = lrbp->cmd->cmnd; unsigned char *cdb = lrbp->cmd->cmnd;
__be64 ppn_tmp = ppn; __be64 ppn_tmp = ppn;
@ -703,8 +702,7 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
} }
} }
ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len, ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id);
read_id);
hpb->stats.hit_cnt++; hpb->stats.hit_cnt++;
return 0; return 0;

View File

@ -300,7 +300,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
} }
break; break;
default: default:
pr_info("Unsupport virtio scsi event reason %x\n", event->reason); pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
} }
} }
@ -392,7 +392,7 @@ static void virtscsi_handle_event(struct work_struct *work)
virtscsi_handle_param_change(vscsi, event); virtscsi_handle_param_change(vscsi, event);
break; break;
default: default:
pr_err("Unsupport virtio scsi event %x\n", event->event); pr_err("Unsupported virtio scsi event %x\n", event->event);
} }
virtscsi_kick_event(vscsi, event_node); virtscsi_kick_event(vscsi, event_node);
} }

View File

@ -1111,20 +1111,24 @@ static ssize_t alua_support_store(struct config_item *item,
{ {
struct se_dev_attrib *da = to_attrib(item); struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev; struct se_device *dev = da->da_dev;
bool flag; bool flag, oldflag;
int ret; int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
if (flag == oldflag)
return count;
if (!(dev->transport->transport_flags_changeable & if (!(dev->transport->transport_flags_changeable &
TRANSPORT_FLAG_PASSTHROUGH_ALUA)) { TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
pr_err("dev[%p]: Unable to change SE Device alua_support:" pr_err("dev[%p]: Unable to change SE Device alua_support:"
" alua_support has fixed value\n", dev); " alua_support has fixed value\n", dev);
return -EINVAL; return -ENOSYS;
} }
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
if (flag) if (flag)
dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA; dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
else else
@ -1146,20 +1150,24 @@ static ssize_t pgr_support_store(struct config_item *item,
{ {
struct se_dev_attrib *da = to_attrib(item); struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev; struct se_device *dev = da->da_dev;
bool flag; bool flag, oldflag;
int ret; int ret;
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
if (flag == oldflag)
return count;
if (!(dev->transport->transport_flags_changeable & if (!(dev->transport->transport_flags_changeable &
TRANSPORT_FLAG_PASSTHROUGH_PGR)) { TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
pr_err("dev[%p]: Unable to change SE Device pgr_support:" pr_err("dev[%p]: Unable to change SE Device pgr_support:"
" pgr_support has fixed value\n", dev); " pgr_support has fixed value\n", dev);
return -EINVAL; return -ENOSYS;
} }
ret = strtobool(page, &flag);
if (ret < 0)
return ret;
if (flag) if (flag)
dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR; dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
else else

View File

@ -269,7 +269,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
spin_lock(&dev->dev_reservation_lock); spin_lock(&dev->dev_reservation_lock);
if (dev->reservation_holder && if (dev->reservation_holder &&
dev->reservation_holder->se_node_acl != sess->se_node_acl) { dev->reservation_holder->se_node_acl != sess->se_node_acl) {
pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", pr_err("SCSI-2 RESERVATION CONFLICT for %s fabric\n",
tpg->se_tpg_tfo->fabric_name); tpg->se_tpg_tfo->fabric_name);
pr_err("Original reserver LUN: %llu %s\n", pr_err("Original reserver LUN: %llu %s\n",
cmd->se_lun->unpacked_lun, cmd->se_lun->unpacked_lun,

View File

@ -146,7 +146,6 @@ struct scsi_device {
struct scsi_vpd __rcu *vpd_pg83; struct scsi_vpd __rcu *vpd_pg83;
struct scsi_vpd __rcu *vpd_pg80; struct scsi_vpd __rcu *vpd_pg80;
struct scsi_vpd __rcu *vpd_pg89; struct scsi_vpd __rcu *vpd_pg89;
unsigned char current_tag; /* current tag */
struct scsi_target *sdev_target; struct scsi_target *sdev_target;
blist_flags_t sdev_bflags; /* black/white flags as also found in blist_flags_t sdev_bflags; /* black/white flags as also found in