2019-05-27 06:55:05 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2006-04-07 02:13:41 +00:00
|
|
|
/*
|
|
|
|
* iSCSI lib functions
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Red Hat, Inc. All rights reserved.
|
|
|
|
* Copyright (C) 2004 - 2006 Mike Christie
|
|
|
|
* Copyright (C) 2004 - 2005 Dmitry Yusupov
|
|
|
|
* Copyright (C) 2004 - 2005 Alex Aizman
|
|
|
|
* maintained by open-iscsi@googlegroups.com
|
|
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kfifo.h>
|
|
|
|
#include <linux/delay.h>
|
2007-12-13 18:43:41 +00:00
|
|
|
#include <linux/log2.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2017-02-08 17:51:30 +00:00
|
|
|
#include <linux/sched/signal.h>
|
2011-05-27 13:47:43 +00:00
|
|
|
#include <linux/module.h>
|
2007-02-28 23:32:19 +00:00
|
|
|
#include <asm/unaligned.h>
|
2006-04-07 02:13:41 +00:00
|
|
|
#include <net/tcp.h>
|
|
|
|
#include <scsi/scsi_cmnd.h>
|
|
|
|
#include <scsi/scsi_device.h>
|
|
|
|
#include <scsi/scsi_eh.h>
|
|
|
|
#include <scsi/scsi_tcq.h>
|
|
|
|
#include <scsi/scsi_host.h>
|
|
|
|
#include <scsi/scsi.h>
|
|
|
|
#include <scsi/iscsi_proto.h>
|
|
|
|
#include <scsi/scsi_transport.h>
|
|
|
|
#include <scsi/scsi_transport_iscsi.h>
|
|
|
|
#include <scsi/libiscsi.h>
|
2018-11-21 17:04:43 +00:00
|
|
|
#include <trace/events/iscsi.h>
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2009-06-16 03:11:10 +00:00
|
|
|
static int iscsi_dbg_lib_conn;
|
|
|
|
module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
|
|
|
|
S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(debug_libiscsi_conn,
|
|
|
|
"Turn on debugging for connections in libiscsi module. "
|
|
|
|
"Set to 1 to turn on, and zero to turn off. Default is off.");
|
|
|
|
|
|
|
|
static int iscsi_dbg_lib_session;
|
|
|
|
module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int,
|
|
|
|
S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(debug_libiscsi_session,
|
|
|
|
"Turn on debugging for sessions in libiscsi module. "
|
|
|
|
"Set to 1 to turn on, and zero to turn off. Default is off.");
|
|
|
|
|
|
|
|
static int iscsi_dbg_lib_eh;
|
|
|
|
module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int,
|
|
|
|
S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(debug_libiscsi_eh,
|
|
|
|
"Turn on debugging for error handling in libiscsi module. "
|
|
|
|
"Set to 1 to turn on, and zero to turn off. Default is off.");
|
2009-03-05 20:45:58 +00:00
|
|
|
|
|
|
|
#define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \
|
|
|
|
do { \
|
2009-06-16 03:11:10 +00:00
|
|
|
if (iscsi_dbg_lib_conn) \
|
2009-03-05 20:45:58 +00:00
|
|
|
iscsi_conn_printk(KERN_INFO, _conn, \
|
|
|
|
"%s " dbg_fmt, \
|
|
|
|
__func__, ##arg); \
|
2018-11-21 17:04:43 +00:00
|
|
|
iscsi_dbg_trace(trace_iscsi_dbg_conn, \
|
|
|
|
&(_conn)->cls_conn->dev, \
|
|
|
|
"%s " dbg_fmt, __func__, ##arg);\
|
2009-03-05 20:45:58 +00:00
|
|
|
} while (0);
|
|
|
|
|
|
|
|
#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \
|
|
|
|
do { \
|
2009-06-16 03:11:10 +00:00
|
|
|
if (iscsi_dbg_lib_session) \
|
|
|
|
iscsi_session_printk(KERN_INFO, _session, \
|
|
|
|
"%s " dbg_fmt, \
|
|
|
|
__func__, ##arg); \
|
2018-11-21 17:04:43 +00:00
|
|
|
iscsi_dbg_trace(trace_iscsi_dbg_session, \
|
|
|
|
&(_session)->cls_session->dev, \
|
|
|
|
"%s " dbg_fmt, __func__, ##arg); \
|
2009-06-16 03:11:10 +00:00
|
|
|
} while (0);
|
|
|
|
|
|
|
|
#define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \
|
|
|
|
do { \
|
|
|
|
if (iscsi_dbg_lib_eh) \
|
2009-03-05 20:45:58 +00:00
|
|
|
iscsi_session_printk(KERN_INFO, _session, \
|
|
|
|
"%s " dbg_fmt, \
|
|
|
|
__func__, ##arg); \
|
2018-11-21 17:04:43 +00:00
|
|
|
iscsi_dbg_trace(trace_iscsi_dbg_eh, \
|
|
|
|
&(_session)->cls_session->dev, \
|
|
|
|
"%s " dbg_fmt, __func__, ##arg); \
|
2009-03-05 20:45:58 +00:00
|
|
|
} while (0);
|
|
|
|
|
2022-06-16 22:45:55 +00:00
|
|
|
#define ISCSI_CMD_COMPL_WAIT 5
|
|
|
|
|
2022-06-16 22:45:49 +00:00
|
|
|
inline void iscsi_conn_queue_xmit(struct iscsi_conn *conn)
|
2009-03-05 20:46:03 +00:00
|
|
|
{
|
|
|
|
struct Scsi_Host *shost = conn->session->host;
|
|
|
|
struct iscsi_host *ihost = shost_priv(shost);
|
|
|
|
|
2009-05-13 22:57:48 +00:00
|
|
|
if (ihost->workq)
|
|
|
|
queue_work(ihost->workq, &conn->xmitwork);
|
2009-03-05 20:46:03 +00:00
|
|
|
}
|
2022-06-16 22:45:49 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_queue_xmit);
|
2009-03-05 20:46:03 +00:00
|
|
|
|
2022-06-16 22:45:50 +00:00
|
|
|
inline void iscsi_conn_queue_recv(struct iscsi_conn *conn)
|
|
|
|
{
|
|
|
|
struct Scsi_Host *shost = conn->session->host;
|
|
|
|
struct iscsi_host *ihost = shost_priv(shost);
|
|
|
|
|
|
|
|
if (ihost->workq && !test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))
|
|
|
|
queue_work(ihost->workq, &conn->recvwork);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_queue_recv);
|
|
|
|
|
2009-09-05 02:04:23 +00:00
|
|
|
static void __iscsi_update_cmdsn(struct iscsi_session *session,
|
|
|
|
uint32_t exp_cmdsn, uint32_t max_cmdsn)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2007-05-30 17:57:18 +00:00
|
|
|
/*
|
|
|
|
* standard specifies this check for when to update expected and
|
|
|
|
* max sequence numbers
|
|
|
|
*/
|
|
|
|
if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (exp_cmdsn != session->exp_cmdsn &&
|
|
|
|
!iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
|
2006-04-07 02:13:41 +00:00
|
|
|
session->exp_cmdsn = exp_cmdsn;
|
|
|
|
|
2007-05-30 17:57:18 +00:00
|
|
|
if (max_cmdsn != session->max_cmdsn &&
|
2014-02-07 06:41:39 +00:00
|
|
|
!iscsi_sna_lt(max_cmdsn, session->max_cmdsn))
|
2007-05-30 17:57:18 +00:00
|
|
|
session->max_cmdsn = max_cmdsn;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
2009-09-05 02:04:23 +00:00
|
|
|
|
|
|
|
void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
|
|
|
|
{
|
|
|
|
__iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn),
|
|
|
|
be32_to_cpu(hdr->max_cmdsn));
|
|
|
|
}
|
2007-05-30 17:57:18 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-12-02 06:32:05 +00:00
|
|
|
/**
|
|
|
|
* iscsi_prep_data_out_pdu - initialize Data-Out
|
|
|
|
* @task: scsi command task
|
|
|
|
* @r2t: R2T info
|
|
|
|
* @hdr: iscsi data in pdu
|
|
|
|
*
|
|
|
|
* Notes:
|
|
|
|
* Initialize Data-Out within this R2T sequence and finds
|
|
|
|
* proper data_offset within this SCSI command.
|
|
|
|
*
|
|
|
|
* This function is called with connection lock taken.
|
|
|
|
**/
|
|
|
|
void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
|
|
|
|
struct iscsi_data *hdr)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_conn *conn = task->conn;
|
2008-12-02 06:32:05 +00:00
|
|
|
unsigned int left = r2t->data_length - r2t->sent;
|
|
|
|
|
|
|
|
task->hdr_len = sizeof(struct iscsi_data);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
|
|
|
memset(hdr, 0, sizeof(struct iscsi_data));
|
2008-12-02 06:32:05 +00:00
|
|
|
hdr->ttt = r2t->ttt;
|
|
|
|
hdr->datasn = cpu_to_be32(r2t->datasn);
|
|
|
|
r2t->datasn++;
|
2006-04-07 02:13:41 +00:00
|
|
|
hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
|
2011-06-16 22:57:09 +00:00
|
|
|
hdr->lun = task->lun;
|
2008-12-02 06:32:05 +00:00
|
|
|
hdr->itt = task->hdr_itt;
|
|
|
|
hdr->exp_statsn = r2t->exp_statsn;
|
|
|
|
hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
|
|
|
|
if (left > conn->max_xmit_dlength) {
|
2006-04-07 02:13:41 +00:00
|
|
|
hton24(hdr->dlength, conn->max_xmit_dlength);
|
2008-12-02 06:32:05 +00:00
|
|
|
r2t->data_count = conn->max_xmit_dlength;
|
2006-04-07 02:13:41 +00:00
|
|
|
hdr->flags = 0;
|
|
|
|
} else {
|
2008-12-02 06:32:05 +00:00
|
|
|
hton24(hdr->dlength, left);
|
|
|
|
r2t->data_count = left;
|
2006-04-07 02:13:41 +00:00
|
|
|
hdr->flags = ISCSI_FLAG_CMD_FINAL;
|
|
|
|
}
|
2008-12-02 06:32:05 +00:00
|
|
|
conn->dataout_pdus_cnt++;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
2008-12-02 06:32:05 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
|
2007-12-13 18:43:23 +00:00
|
|
|
{
|
2008-05-21 20:54:09 +00:00
|
|
|
unsigned exp_len = task->hdr_len + len;
|
2007-12-13 18:43:23 +00:00
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
if (exp_len > task->hdr_max) {
|
2007-12-13 18:43:23 +00:00
|
|
|
WARN_ON(1);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
|
2008-05-21 20:54:09 +00:00
|
|
|
task->hdr_len = exp_len;
|
2007-12-13 18:43:23 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-04-18 15:11:51 +00:00
|
|
|
/*
|
|
|
|
* make an extended cdb AHS
|
|
|
|
*/
|
2008-05-21 20:54:09 +00:00
|
|
|
static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
|
2008-04-18 15:11:51 +00:00
|
|
|
{
|
2008-05-21 20:54:09 +00:00
|
|
|
struct scsi_cmnd *cmd = task->sc;
|
2008-04-18 15:11:51 +00:00
|
|
|
unsigned rlen, pad_len;
|
|
|
|
unsigned short ahslength;
|
|
|
|
struct iscsi_ecdb_ahdr *ecdb_ahdr;
|
|
|
|
int rc;
|
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
ecdb_ahdr = iscsi_next_hdr(task);
|
2008-04-18 15:11:51 +00:00
|
|
|
rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
|
|
|
|
|
|
|
|
BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
|
|
|
|
ahslength = rlen + sizeof(ecdb_ahdr->reserved);
|
|
|
|
|
|
|
|
pad_len = iscsi_padding(rlen);
|
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
|
2008-04-18 15:11:51 +00:00
|
|
|
sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (pad_len)
|
|
|
|
memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
|
|
|
|
|
|
|
|
ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
|
|
|
|
ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
|
|
|
|
ecdb_ahdr->reserved = 0;
|
|
|
|
memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
|
|
|
|
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_SESSION(task->conn->session,
|
|
|
|
"iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
|
|
|
|
"rlen %d pad_len %d ahs_length %d iscsi_headers_size "
|
|
|
|
"%u\n", cmd->cmd_len, rlen, pad_len, ahslength,
|
|
|
|
task->hdr_len);
|
2008-04-18 15:11:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-11 22:34:32 +00:00
|
|
|
/**
|
|
|
|
* iscsi_check_tmf_restrictions - check if a task is affected by TMF
|
|
|
|
* @task: iscsi task
|
|
|
|
* @opcode: opcode to check for
|
|
|
|
*
|
|
|
|
* During TMF a task has to be checked if it's affected.
|
|
|
|
* All unrelated I/O can be passed through, but I/O to the
|
|
|
|
* affected LUN should be restricted.
|
|
|
|
* If 'fast_abort' is set we won't be sending any I/O to the
|
|
|
|
* affected LUN.
|
|
|
|
* Otherwise the target is waiting for all TTTs to be completed,
|
|
|
|
* so we have to send all outstanding Data-Out PDUs to the target.
|
|
|
|
*/
|
|
|
|
static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
|
|
|
|
{
|
2021-05-25 18:18:06 +00:00
|
|
|
struct iscsi_session *session = task->conn->session;
|
|
|
|
struct iscsi_tm *tmf = &session->tmhdr;
|
2014-06-25 13:27:36 +00:00
|
|
|
u64 hdr_lun;
|
2009-11-11 22:34:32 +00:00
|
|
|
|
2021-05-25 18:18:06 +00:00
|
|
|
if (session->tmf_state == TMF_INITIAL)
|
2009-11-11 22:34:32 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (ISCSI_TM_FUNC_VALUE(tmf)) {
|
|
|
|
case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
|
|
|
|
/*
|
|
|
|
* Allow PDUs for unrelated LUNs
|
|
|
|
*/
|
2011-06-16 22:57:09 +00:00
|
|
|
hdr_lun = scsilun_to_int(&tmf->lun);
|
2009-11-11 22:34:32 +00:00
|
|
|
if (hdr_lun != task->sc->device->lun)
|
|
|
|
return 0;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2009-11-11 22:34:33 +00:00
|
|
|
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
|
2009-11-11 22:34:32 +00:00
|
|
|
/*
|
|
|
|
* Fail all SCSI cmd PDUs
|
|
|
|
*/
|
|
|
|
if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
|
2021-05-25 18:18:06 +00:00
|
|
|
iscsi_session_printk(KERN_INFO, session,
|
|
|
|
"task [op %x itt 0x%x/0x%x] rejected.\n",
|
|
|
|
opcode, task->itt, task->hdr_itt);
|
2009-11-11 22:34:32 +00:00
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* And also all data-out PDUs in response to R2T
|
|
|
|
* if fast_abort is set.
|
|
|
|
*/
|
2021-05-25 18:18:06 +00:00
|
|
|
if (session->fast_abort) {
|
|
|
|
iscsi_session_printk(KERN_INFO, session,
|
|
|
|
"task [op %x itt 0x%x/0x%x] fast abort.\n",
|
|
|
|
opcode, task->itt, task->hdr_itt);
|
2009-11-11 22:34:32 +00:00
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ISCSI_TM_FUNC_ABORT_TASK:
|
|
|
|
/*
|
|
|
|
* the caller has already checked if the task
|
|
|
|
* they want to abort was in the pending queue so if
|
|
|
|
* we are here the cmd pdu has gone out already, and
|
|
|
|
* we will only hit this for data-outs
|
|
|
|
*/
|
|
|
|
if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
|
|
|
|
task->hdr_itt == tmf->rtt) {
|
2021-05-25 18:18:06 +00:00
|
|
|
ISCSI_DBG_SESSION(session,
|
2009-11-11 22:34:32 +00:00
|
|
|
"Preventing task %x/%x from sending "
|
|
|
|
"data-out due to abort task in "
|
|
|
|
"progress\n", task->itt,
|
|
|
|
task->hdr_itt);
|
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
/**
|
|
|
|
* iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
|
2008-05-21 20:54:09 +00:00
|
|
|
* @task: iscsi task
|
2006-04-07 02:13:41 +00:00
|
|
|
*
|
|
|
|
* Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
|
|
|
|
* fields like dlength or final based on how much data it sends
|
|
|
|
*/
|
2008-05-21 20:54:09 +00:00
|
|
|
static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_conn *conn = task->conn;
|
2006-04-07 02:13:41 +00:00
|
|
|
struct iscsi_session *session = conn->session;
|
2008-05-21 20:54:09 +00:00
|
|
|
struct scsi_cmnd *sc = task->sc;
|
2011-05-27 11:16:33 +00:00
|
|
|
struct iscsi_scsi_req *hdr;
|
2014-06-11 09:09:58 +00:00
|
|
|
unsigned hdrlength, cmd_len, transfer_length;
|
2008-12-02 06:32:13 +00:00
|
|
|
itt_t itt;
|
2007-12-13 18:43:23 +00:00
|
|
|
int rc;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2009-11-11 22:34:32 +00:00
|
|
|
rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2009-05-13 22:57:39 +00:00
|
|
|
if (conn->session->tt->alloc_pdu) {
|
|
|
|
rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
2011-05-27 11:16:33 +00:00
|
|
|
hdr = (struct iscsi_scsi_req *)task->hdr;
|
2008-12-02 06:32:13 +00:00
|
|
|
itt = hdr->itt;
|
2008-12-02 06:32:05 +00:00
|
|
|
memset(hdr, 0, sizeof(*hdr));
|
|
|
|
|
2008-12-02 06:32:14 +00:00
|
|
|
if (session->tt->parse_pdu_itt)
|
|
|
|
hdr->itt = task->hdr_itt = itt;
|
|
|
|
else
|
|
|
|
hdr->itt = task->hdr_itt = build_itt(task->itt,
|
|
|
|
task->conn->session->age);
|
2008-05-21 20:54:09 +00:00
|
|
|
task->hdr_len = 0;
|
|
|
|
rc = iscsi_add_hdr(task, sizeof(*hdr));
|
2007-12-13 18:43:23 +00:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
2007-12-13 18:43:35 +00:00
|
|
|
hdr->opcode = ISCSI_OP_SCSI_CMD;
|
|
|
|
hdr->flags = ISCSI_ATTR_SIMPLE;
|
2011-06-16 22:57:09 +00:00
|
|
|
int_to_scsilun(sc->device->lun, &hdr->lun);
|
|
|
|
task->lun = hdr->lun;
|
2007-12-13 18:43:35 +00:00
|
|
|
hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
|
2008-04-18 15:11:51 +00:00
|
|
|
cmd_len = sc->cmd_len;
|
|
|
|
if (cmd_len < ISCSI_CDB_SIZE)
|
|
|
|
memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
|
|
|
|
else if (cmd_len > ISCSI_CDB_SIZE) {
|
2008-05-21 20:54:09 +00:00
|
|
|
rc = iscsi_prep_ecdb_ahs(task);
|
2008-04-18 15:11:51 +00:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
cmd_len = ISCSI_CDB_SIZE;
|
|
|
|
}
|
|
|
|
memcpy(hdr->cdb, sc->cmnd, cmd_len);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
task->imm_count = 0;
|
2014-03-05 17:43:49 +00:00
|
|
|
if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
|
|
|
|
task->protected = true;
|
|
|
|
|
2014-06-11 09:09:58 +00:00
|
|
|
transfer_length = scsi_transfer_length(sc);
|
|
|
|
hdr->data_length = cpu_to_be32(transfer_length);
|
2006-04-07 02:13:41 +00:00
|
|
|
if (sc->sc_data_direction == DMA_TO_DEVICE) {
|
2008-12-02 06:32:05 +00:00
|
|
|
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
hdr->flags |= ISCSI_FLAG_CMD_WRITE;
|
|
|
|
/*
|
|
|
|
* Write counters:
|
|
|
|
*
|
|
|
|
* imm_count bytes to be sent right after
|
|
|
|
* SCSI PDU Header
|
|
|
|
*
|
|
|
|
* unsol_count bytes(as Data-Out) to be sent
|
|
|
|
* without R2T ack right after
|
|
|
|
* immediate data
|
|
|
|
*
|
2008-12-02 06:32:05 +00:00
|
|
|
* r2t data_length bytes to be sent via R2T ack's
|
2006-04-07 02:13:41 +00:00
|
|
|
*
|
|
|
|
* pad_count bytes to be sent as zero-padding
|
|
|
|
*/
|
2008-12-02 06:32:05 +00:00
|
|
|
memset(r2t, 0, sizeof(*r2t));
|
2006-04-07 02:13:41 +00:00
|
|
|
|
|
|
|
if (session->imm_data_en) {
|
2014-06-11 09:09:58 +00:00
|
|
|
if (transfer_length >= session->first_burst)
|
2008-05-21 20:54:09 +00:00
|
|
|
task->imm_count = min(session->first_burst,
|
2006-04-07 02:13:41 +00:00
|
|
|
conn->max_xmit_dlength);
|
|
|
|
else
|
2014-06-11 09:09:58 +00:00
|
|
|
task->imm_count = min(transfer_length,
|
|
|
|
conn->max_xmit_dlength);
|
2008-05-21 20:54:09 +00:00
|
|
|
hton24(hdr->dlength, task->imm_count);
|
2006-04-07 02:13:41 +00:00
|
|
|
} else
|
2007-12-13 18:43:35 +00:00
|
|
|
zero_data(hdr->dlength);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2006-08-31 22:09:24 +00:00
|
|
|
if (!session->initial_r2t_en) {
|
2014-06-11 09:09:58 +00:00
|
|
|
r2t->data_length = min(session->first_burst,
|
|
|
|
transfer_length) -
|
2008-12-02 06:32:05 +00:00
|
|
|
task->imm_count;
|
|
|
|
r2t->data_offset = task->imm_count;
|
|
|
|
r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
|
|
|
|
r2t->exp_statsn = cpu_to_be32(conn->exp_statsn);
|
2006-08-31 22:09:24 +00:00
|
|
|
}
|
|
|
|
|
2008-12-02 06:32:05 +00:00
|
|
|
if (!task->unsol_r2t.data_length)
|
2006-04-07 02:13:41 +00:00
|
|
|
/* No unsolicit Data-Out's */
|
2007-12-13 18:43:35 +00:00
|
|
|
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
|
2006-04-07 02:13:41 +00:00
|
|
|
} else {
|
|
|
|
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
|
|
|
|
zero_data(hdr->dlength);
|
|
|
|
|
|
|
|
if (sc->sc_data_direction == DMA_FROM_DEVICE)
|
|
|
|
hdr->flags |= ISCSI_FLAG_CMD_READ;
|
|
|
|
}
|
|
|
|
|
2007-12-13 18:43:23 +00:00
|
|
|
/* calculate size of additional header segments (AHSs) */
|
2008-05-21 20:54:09 +00:00
|
|
|
hdrlength = task->hdr_len - sizeof(*hdr);
|
2007-12-13 18:43:23 +00:00
|
|
|
|
|
|
|
WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
|
|
|
|
hdrlength /= ISCSI_PAD_LEN;
|
|
|
|
|
|
|
|
WARN_ON(hdrlength >= 256);
|
|
|
|
hdr->hlength = hdrlength & 0xFF;
|
2010-04-24 21:21:19 +00:00
|
|
|
hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
|
2007-12-13 18:43:23 +00:00
|
|
|
|
2008-12-02 06:32:05 +00:00
|
|
|
if (session->tt->init_task && session->tt->init_task(task))
|
2008-05-21 20:54:05 +00:00
|
|
|
return -EIO;
|
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
task->state = ISCSI_TASK_RUNNING;
|
2009-08-20 20:10:58 +00:00
|
|
|
session->cmdsn++;
|
2007-05-30 17:57:18 +00:00
|
|
|
|
2007-12-13 18:43:35 +00:00
|
|
|
conn->scsicmd_pdus_cnt++;
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
|
2019-01-29 08:33:07 +00:00
|
|
|
"itt 0x%x len %d cmdsn %d win %d]\n",
|
2009-03-05 20:45:58 +00:00
|
|
|
sc->sc_data_direction == DMA_TO_DEVICE ?
|
|
|
|
"write" : "read", conn->id, sc, sc->cmnd[0],
|
2014-06-11 09:09:58 +00:00
|
|
|
task->itt, transfer_length,
|
2009-03-05 20:45:58 +00:00
|
|
|
session->cmdsn,
|
|
|
|
session->max_cmdsn - session->exp_cmdsn + 1);
|
2007-12-13 18:43:23 +00:00
|
|
|
return 0;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2009-05-13 22:57:46 +00:00
|
|
|
* iscsi_free_task - free a task
|
2008-05-21 20:54:09 +00:00
|
|
|
* @task: iscsi cmd task
|
2006-04-07 02:13:41 +00:00
|
|
|
*
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
* Must be called with session back_lock.
|
2008-05-21 20:54:06 +00:00
|
|
|
* This function returns the scsi command to scsi-ml or cleans
|
|
|
|
* up mgmt tasks then returns the task to the pool.
|
2006-04-07 02:13:41 +00:00
|
|
|
*/
|
2009-05-13 22:57:46 +00:00
|
|
|
static void iscsi_free_task(struct iscsi_task *task)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_conn *conn = task->conn;
|
2007-12-13 18:43:33 +00:00
|
|
|
struct iscsi_session *session = conn->session;
|
2008-05-21 20:54:09 +00:00
|
|
|
struct scsi_cmnd *sc = task->sc;
|
2010-12-31 08:22:21 +00:00
|
|
|
int oldstate = task->state;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2009-05-13 22:57:50 +00:00
|
|
|
ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
|
|
|
|
task->itt, task->state, task->sc);
|
|
|
|
|
2008-12-02 06:32:05 +00:00
|
|
|
session->tt->cleanup_task(task);
|
2009-05-13 22:57:46 +00:00
|
|
|
task->state = ISCSI_TASK_FREE;
|
2008-05-21 20:54:09 +00:00
|
|
|
task->sc = NULL;
|
2008-05-21 20:54:06 +00:00
|
|
|
/*
|
2008-05-21 20:54:09 +00:00
|
|
|
* login task is preallocated so do not free
|
2008-05-21 20:54:06 +00:00
|
|
|
*/
|
2008-05-21 20:54:09 +00:00
|
|
|
if (conn->login_task == task)
|
2008-05-21 20:54:06 +00:00
|
|
|
return;
|
|
|
|
|
2009-12-21 22:37:28 +00:00
|
|
|
kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
|
2008-05-21 20:54:05 +00:00
|
|
|
|
2008-05-21 20:54:06 +00:00
|
|
|
if (sc) {
|
|
|
|
/* SCSI eh reuses commands to verify us */
|
2022-02-18 19:50:53 +00:00
|
|
|
iscsi_cmd(sc)->task = NULL;
|
2008-05-21 20:54:06 +00:00
|
|
|
/*
|
2010-12-31 08:22:21 +00:00
|
|
|
* queue command may call this to free the task, so
|
|
|
|
* it will decide how to return sc to scsi-ml.
|
2008-05-21 20:54:06 +00:00
|
|
|
*/
|
2010-12-31 08:22:21 +00:00
|
|
|
if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
|
2021-10-07 20:28:39 +00:00
|
|
|
scsi_done(sc);
|
2008-05-21 20:54:06 +00:00
|
|
|
}
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 22:45:55 +00:00
|
|
|
bool iscsi_get_task(struct iscsi_task *task)
|
2006-08-31 22:09:25 +00:00
|
|
|
{
|
2022-06-16 22:45:55 +00:00
|
|
|
return refcount_inc_not_zero(&task->refcount);
|
2006-08-31 22:09:25 +00:00
|
|
|
}
|
2022-06-16 22:45:55 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_get_task);
|
2006-08-31 22:09:25 +00:00
|
|
|
|
2022-06-16 22:45:56 +00:00
|
|
|
/**
|
|
|
|
* __iscsi_put_task - drop the refcount on a task
|
|
|
|
* @task: iscsi_task to drop the refcount on
|
|
|
|
*
|
|
|
|
* The back_lock must be held when calling in case it frees the task.
|
|
|
|
*/
|
2010-11-23 23:29:21 +00:00
|
|
|
void __iscsi_put_task(struct iscsi_task *task)
|
2006-08-31 22:09:25 +00:00
|
|
|
{
|
2017-03-09 11:46:58 +00:00
|
|
|
if (refcount_dec_and_test(&task->refcount))
|
2009-05-13 22:57:46 +00:00
|
|
|
iscsi_free_task(task);
|
2006-08-31 22:09:25 +00:00
|
|
|
}
|
2010-11-23 23:29:21 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__iscsi_put_task);
|
2006-08-31 22:09:25 +00:00
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
void iscsi_put_task(struct iscsi_task *task)
|
2008-05-21 20:54:06 +00:00
|
|
|
{
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_session *session = task->conn->session;
|
2008-05-21 20:54:06 +00:00
|
|
|
|
2022-06-16 22:45:56 +00:00
|
|
|
if (refcount_dec_and_test(&task->refcount)) {
|
|
|
|
spin_lock_bh(&session->back_lock);
|
|
|
|
iscsi_free_task(task);
|
|
|
|
spin_unlock_bh(&session->back_lock);
|
|
|
|
}
|
2008-05-21 20:54:06 +00:00
|
|
|
}
|
2008-05-21 20:54:09 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_put_task);
|
2008-05-21 20:54:06 +00:00
|
|
|
|
2009-05-13 22:57:46 +00:00
|
|
|
/**
|
|
|
|
* iscsi_complete_task - finish a task
|
|
|
|
* @task: iscsi cmd task
|
2009-05-13 22:57:49 +00:00
|
|
|
* @state: state to complete task with
|
2009-05-13 22:57:46 +00:00
|
|
|
*
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
* Must be called with session back_lock.
|
2009-05-13 22:57:46 +00:00
|
|
|
*/
|
2009-05-13 22:57:49 +00:00
|
|
|
static void iscsi_complete_task(struct iscsi_task *task, int state)
|
2009-05-13 22:57:46 +00:00
|
|
|
{
|
|
|
|
struct iscsi_conn *conn = task->conn;
|
|
|
|
|
2009-05-13 22:57:50 +00:00
|
|
|
ISCSI_DBG_SESSION(conn->session,
|
|
|
|
"complete task itt 0x%x state %d sc %p\n",
|
|
|
|
task->itt, task->state, task->sc);
|
2009-05-13 22:57:49 +00:00
|
|
|
if (task->state == ISCSI_TASK_COMPLETED ||
|
|
|
|
task->state == ISCSI_TASK_ABRT_TMF ||
|
2010-12-31 08:22:21 +00:00
|
|
|
task->state == ISCSI_TASK_ABRT_SESS_RECOV ||
|
|
|
|
task->state == ISCSI_TASK_REQUEUE_SCSIQ)
|
2009-05-13 22:57:46 +00:00
|
|
|
return;
|
|
|
|
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
|
2009-05-13 22:57:49 +00:00
|
|
|
task->state = state;
|
2009-05-13 22:57:46 +00:00
|
|
|
|
2020-11-06 19:33:17 +00:00
|
|
|
if (READ_ONCE(conn->ping_task) == task)
|
|
|
|
WRITE_ONCE(conn->ping_task, NULL);
|
2009-05-13 22:57:46 +00:00
|
|
|
|
|
|
|
/* release get from queueing */
|
|
|
|
__iscsi_put_task(task);
|
|
|
|
}
|
|
|
|
|
2009-09-05 02:04:23 +00:00
|
|
|
/**
|
|
|
|
* iscsi_complete_scsi_task - finish scsi task normally
|
|
|
|
* @task: iscsi task for scsi cmd
|
|
|
|
* @exp_cmdsn: expected cmd sn in cpu format
|
|
|
|
* @max_cmdsn: max cmd sn in cpu format
|
|
|
|
*
|
|
|
|
* This is used when drivers do not need or cannot perform
|
|
|
|
* lower level pdu processing.
|
|
|
|
*
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
* Called with session back_lock
|
2009-09-05 02:04:23 +00:00
|
|
|
*/
|
|
|
|
void iscsi_complete_scsi_task(struct iscsi_task *task,
|
|
|
|
uint32_t exp_cmdsn, uint32_t max_cmdsn)
|
|
|
|
{
|
|
|
|
struct iscsi_conn *conn = task->conn;
|
|
|
|
|
|
|
|
ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt);
|
|
|
|
|
|
|
|
conn->last_recv = jiffies;
|
|
|
|
__iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn);
|
|
|
|
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
|
|
|
|
|
2021-02-07 04:46:01 +00:00
|
|
|
/*
|
|
|
|
* Must be called with back and frwd lock
|
|
|
|
*/
|
|
|
|
static bool cleanup_queued_task(struct iscsi_task *task)
|
|
|
|
{
|
|
|
|
struct iscsi_conn *conn = task->conn;
|
|
|
|
bool early_complete = false;
|
|
|
|
|
2022-06-16 22:45:54 +00:00
|
|
|
/*
|
|
|
|
* We might have raced where we handled a R2T early and got a response
|
|
|
|
* but have not yet taken the task off the requeue list, then a TMF or
|
|
|
|
* recovery happened and so we can still see it here.
|
|
|
|
*/
|
2021-02-07 04:46:01 +00:00
|
|
|
if (task->state == ISCSI_TASK_COMPLETED)
|
|
|
|
early_complete = true;
|
|
|
|
|
|
|
|
if (!list_empty(&task->running)) {
|
|
|
|
list_del_init(&task->running);
|
|
|
|
/*
|
2022-06-16 22:45:54 +00:00
|
|
|
* If it's on a list but still running this could be cleanup
|
|
|
|
* from a TMF or session recovery.
|
2021-02-07 04:46:01 +00:00
|
|
|
*/
|
|
|
|
if (task->state == ISCSI_TASK_RUNNING ||
|
|
|
|
task->state == ISCSI_TASK_COMPLETED)
|
|
|
|
__iscsi_put_task(task);
|
|
|
|
}
|
|
|
|
|
2021-05-25 18:18:10 +00:00
|
|
|
if (conn->session->running_aborted_task == task) {
|
|
|
|
conn->session->running_aborted_task = NULL;
|
|
|
|
__iscsi_put_task(task);
|
|
|
|
}
|
|
|
|
|
2021-02-07 04:46:01 +00:00
|
|
|
if (conn->task == task) {
|
|
|
|
conn->task = NULL;
|
|
|
|
__iscsi_put_task(task);
|
|
|
|
}
|
|
|
|
|
|
|
|
return early_complete;
|
|
|
|
}
|
2009-09-05 02:04:23 +00:00
|
|
|
|
2007-12-13 18:43:26 +00:00
|
|
|
/*
|
2022-06-16 22:45:55 +00:00
|
|
|
* session back and frwd lock must be held and if not called for a task that
|
|
|
|
* is still pending or from the xmit thread, then xmit thread must be suspended
|
2007-12-13 18:43:26 +00:00
|
|
|
*/
|
2022-06-16 22:45:55 +00:00
|
|
|
static void __fail_scsi_task(struct iscsi_task *task, int err)
|
2007-12-13 18:43:26 +00:00
|
|
|
{
|
2009-05-13 22:57:46 +00:00
|
|
|
struct iscsi_conn *conn = task->conn;
|
2007-12-13 18:43:26 +00:00
|
|
|
struct scsi_cmnd *sc;
|
2009-05-13 22:57:49 +00:00
|
|
|
int state;
|
2007-12-13 18:43:26 +00:00
|
|
|
|
2022-06-16 22:45:55 +00:00
|
|
|
if (cleanup_queued_task(task))
|
2021-02-07 04:46:01 +00:00
|
|
|
return;
|
|
|
|
|
2009-05-13 22:57:49 +00:00
|
|
|
if (task->state == ISCSI_TASK_PENDING) {
|
2007-12-13 18:43:26 +00:00
|
|
|
/*
|
|
|
|
* cmd never made it to the xmit thread, so we should not count
|
|
|
|
* the cmd in the sequencing
|
|
|
|
*/
|
|
|
|
conn->session->queued_cmdsn--;
|
2009-05-13 22:57:49 +00:00
|
|
|
/* it was never sent so just complete like normal */
|
|
|
|
state = ISCSI_TASK_COMPLETED;
|
|
|
|
} else if (err == DID_TRANSPORT_DISRUPTED)
|
|
|
|
state = ISCSI_TASK_ABRT_SESS_RECOV;
|
|
|
|
else
|
|
|
|
state = ISCSI_TASK_ABRT_TMF;
|
2007-12-13 18:43:26 +00:00
|
|
|
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
sc = task->sc;
|
2009-05-13 22:57:49 +00:00
|
|
|
sc->result = err << 16;
|
2019-01-29 08:33:07 +00:00
|
|
|
scsi_set_resid(sc, scsi_bufflen(sc));
|
2009-05-13 22:57:49 +00:00
|
|
|
iscsi_complete_task(task, state);
|
2022-06-16 22:45:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void fail_scsi_task(struct iscsi_task *task, int err)
|
|
|
|
{
|
|
|
|
struct iscsi_session *session = task->conn->session;
|
|
|
|
|
|
|
|
spin_lock_bh(&session->back_lock);
|
|
|
|
__fail_scsi_task(task, err);
|
|
|
|
spin_unlock_bh(&session->back_lock);
|
2007-12-13 18:43:26 +00:00
|
|
|
}
|
|
|
|
|
2008-05-21 20:54:06 +00:00
|
|
|
static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_task *task)
|
2008-05-21 20:54:05 +00:00
|
|
|
{
|
|
|
|
struct iscsi_session *session = conn->session;
|
2008-12-02 06:32:05 +00:00
|
|
|
struct iscsi_hdr *hdr = task->hdr;
|
2008-05-21 20:54:05 +00:00
|
|
|
struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
|
2009-11-11 22:34:31 +00:00
|
|
|
uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
|
2008-05-21 20:54:05 +00:00
|
|
|
|
|
|
|
if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
|
|
|
|
return -ENOTCONN;
|
|
|
|
|
2009-11-11 22:34:31 +00:00
|
|
|
if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT)
|
2008-05-21 20:54:05 +00:00
|
|
|
nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
|
|
|
|
/*
|
|
|
|
* pre-format CmdSN for outgoing PDU.
|
|
|
|
*/
|
|
|
|
nop->cmdsn = cpu_to_be32(session->cmdsn);
|
|
|
|
if (hdr->itt != RESERVED_ITT) {
|
|
|
|
/*
|
2009-11-11 22:34:31 +00:00
|
|
|
* TODO: We always use immediate for normal session pdus.
|
2008-05-21 20:54:05 +00:00
|
|
|
* If we start to send tmfs or nops as non-immediate then
|
|
|
|
* we should start checking the cmdsn numbers for mgmt tasks.
|
2009-11-11 22:34:31 +00:00
|
|
|
*
|
|
|
|
* During discovery sessions iscsid sends TEXT as non immediate,
|
|
|
|
* but we always only send one PDU at a time.
|
2008-05-21 20:54:05 +00:00
|
|
|
*/
|
|
|
|
if (conn->c_stage == ISCSI_CONN_STARTED &&
|
|
|
|
!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
|
|
|
|
session->queued_cmdsn++;
|
|
|
|
session->cmdsn++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-12-02 06:32:15 +00:00
|
|
|
if (session->tt->init_task && session->tt->init_task(task))
|
|
|
|
return -EIO;
|
2008-05-21 20:54:05 +00:00
|
|
|
|
|
|
|
if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
|
|
|
|
session->state = ISCSI_STATE_LOGGING_OUT;
|
|
|
|
|
2008-12-02 06:32:05 +00:00
|
|
|
task->state = ISCSI_TASK_RUNNING;
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
|
|
|
|
"datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
|
|
|
|
hdr->itt, task->data_count);
|
2008-05-21 20:54:05 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-16 22:45:57 +00:00
|
|
|
/**
|
|
|
|
* iscsi_alloc_mgmt_task - allocate and setup a mgmt task.
|
|
|
|
* @conn: iscsi conn that the task will be sent on.
|
|
|
|
* @hdr: iscsi pdu that will be sent.
|
|
|
|
* @data: buffer for data segment if needed.
|
|
|
|
* @data_size: length of data in bytes.
|
|
|
|
*/
|
2008-05-21 20:54:09 +00:00
|
|
|
static struct iscsi_task *
|
2022-06-16 22:45:57 +00:00
|
|
|
iscsi_alloc_mgmt_task(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
2007-12-13 18:43:30 +00:00
|
|
|
char *data, uint32_t data_size)
|
|
|
|
{
|
|
|
|
struct iscsi_session *session = conn->session;
|
2009-11-11 22:34:31 +00:00
|
|
|
uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_task *task;
|
2008-12-02 06:32:13 +00:00
|
|
|
itt_t itt;
|
2007-12-13 18:43:30 +00:00
|
|
|
|
2022-04-08 00:13:12 +00:00
|
|
|
if (session->state == ISCSI_STATE_TERMINATE ||
|
|
|
|
!test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
|
2007-12-13 18:43:30 +00:00
|
|
|
return NULL;
|
|
|
|
|
2009-11-11 22:34:31 +00:00
|
|
|
if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
|
2007-12-13 18:43:30 +00:00
|
|
|
/*
|
|
|
|
* Login and Text are sent serially, in
|
|
|
|
* request-followed-by-response sequence.
|
2008-05-21 20:54:06 +00:00
|
|
|
* Same task can be used. Same ITT must be used.
|
|
|
|
* Note that login_task is preallocated at conn_create().
|
2007-12-13 18:43:30 +00:00
|
|
|
*/
|
2009-11-11 22:34:31 +00:00
|
|
|
if (conn->login_task->state != ISCSI_TASK_FREE) {
|
|
|
|
iscsi_conn_printk(KERN_ERR, conn, "Login/Text in "
|
|
|
|
"progress. Cannot start new task.\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-03 05:00:39 +00:00
|
|
|
if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
|
|
|
|
iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
task = conn->login_task;
|
2009-11-11 22:34:31 +00:00
|
|
|
} else {
|
2009-05-13 22:57:43 +00:00
|
|
|
if (session->state != ISCSI_STATE_LOGGED_IN)
|
|
|
|
return NULL;
|
|
|
|
|
2014-09-03 05:00:39 +00:00
|
|
|
if (data_size != 0) {
|
|
|
|
iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-12-13 18:43:30 +00:00
|
|
|
BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
|
|
|
|
BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
|
|
|
|
|
2009-12-21 22:37:28 +00:00
|
|
|
if (!kfifo_out(&session->cmdpool.queue,
|
2008-05-21 20:54:09 +00:00
|
|
|
(void*)&task, sizeof(void*)))
|
2007-12-13 18:43:30 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2008-05-21 20:54:06 +00:00
|
|
|
/*
|
|
|
|
* released in complete pdu for task we expect a response for, and
|
|
|
|
* released by the lld when it has transmitted the task for
|
|
|
|
* pdus we do not expect a response for.
|
|
|
|
*/
|
2017-03-09 11:46:58 +00:00
|
|
|
refcount_set(&task->refcount, 1);
|
2008-05-21 20:54:09 +00:00
|
|
|
task->conn = conn;
|
|
|
|
task->sc = NULL;
|
2009-05-13 22:57:46 +00:00
|
|
|
INIT_LIST_HEAD(&task->running);
|
|
|
|
task->state = ISCSI_TASK_PENDING;
|
2007-12-13 18:43:30 +00:00
|
|
|
|
|
|
|
if (data_size) {
|
2008-05-21 20:54:09 +00:00
|
|
|
memcpy(task->data, data, data_size);
|
|
|
|
task->data_count = data_size;
|
2007-12-13 18:43:30 +00:00
|
|
|
} else
|
2008-05-21 20:54:09 +00:00
|
|
|
task->data_count = 0;
|
2007-12-13 18:43:30 +00:00
|
|
|
|
2009-05-13 22:57:39 +00:00
|
|
|
if (conn->session->tt->alloc_pdu) {
|
|
|
|
if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
|
|
|
|
iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
|
|
|
|
"pdu for mgmt task.\n");
|
2009-05-13 22:57:46 +00:00
|
|
|
goto free_task;
|
2009-05-13 22:57:39 +00:00
|
|
|
}
|
2008-12-02 06:32:05 +00:00
|
|
|
}
|
2009-05-13 22:57:39 +00:00
|
|
|
|
2008-12-02 06:32:13 +00:00
|
|
|
itt = task->hdr->itt;
|
2008-12-02 06:32:05 +00:00
|
|
|
task->hdr_len = sizeof(struct iscsi_hdr);
|
2008-05-21 20:54:09 +00:00
|
|
|
memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
|
2008-12-02 06:32:13 +00:00
|
|
|
|
|
|
|
if (hdr->itt != RESERVED_ITT) {
|
|
|
|
if (session->tt->parse_pdu_itt)
|
|
|
|
task->hdr->itt = itt;
|
|
|
|
else
|
|
|
|
task->hdr->itt = build_itt(task->itt,
|
|
|
|
task->conn->session->age);
|
|
|
|
}
|
|
|
|
|
2022-06-16 22:45:57 +00:00
|
|
|
return task;
|
|
|
|
|
|
|
|
free_task:
|
|
|
|
iscsi_put_task(task);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iscsi_send_mgmt_task - Send task created with iscsi_alloc_mgmt_task.
|
|
|
|
* @task: iscsi task to send.
|
|
|
|
*
|
|
|
|
* On failure this returns a non-zero error code, and the driver must free
|
|
|
|
* the task with iscsi_put_task;
|
|
|
|
*/
|
|
|
|
static int iscsi_send_mgmt_task(struct iscsi_task *task)
|
|
|
|
{
|
|
|
|
struct iscsi_conn *conn = task->conn;
|
|
|
|
struct iscsi_session *session = conn->session;
|
|
|
|
struct iscsi_host *ihost = shost_priv(conn->session->host);
|
|
|
|
int rc = 0;
|
2020-11-06 19:33:17 +00:00
|
|
|
|
2009-05-13 22:57:48 +00:00
|
|
|
if (!ihost->workq) {
|
2022-06-16 22:45:57 +00:00
|
|
|
rc = iscsi_prep_mgmt_task(conn, task);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2008-05-21 20:54:05 +00:00
|
|
|
|
2022-06-16 22:45:57 +00:00
|
|
|
rc = session->tt->xmit_task(task);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2009-05-13 22:57:46 +00:00
|
|
|
} else {
|
|
|
|
list_add_tail(&task->running, &conn->mgmtqueue);
|
2022-06-16 22:45:49 +00:00
|
|
|
iscsi_conn_queue_xmit(conn);
|
2009-05-13 22:57:46 +00:00
|
|
|
}
|
2008-05-21 20:54:05 +00:00
|
|
|
|
2022-06-16 22:45:57 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-12-02 06:32:05 +00:00
|
|
|
|
2022-06-16 22:45:57 +00:00
|
|
|
static int __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
|
|
|
char *data, uint32_t data_size)
|
|
|
|
{
|
|
|
|
struct iscsi_task *task;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
task = iscsi_alloc_mgmt_task(conn, hdr, data, data_size);
|
|
|
|
if (!task)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
rc = iscsi_send_mgmt_task(task);
|
|
|
|
if (rc)
|
|
|
|
iscsi_put_task(task);
|
|
|
|
return rc;
|
2007-12-13 18:43:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
|
|
|
|
char *data, uint32_t data_size)
|
|
|
|
{
|
|
|
|
struct iscsi_conn *conn = cls_conn->dd_data;
|
|
|
|
struct iscsi_session *session = conn->session;
|
|
|
|
int err = 0;
|
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2022-06-16 22:45:57 +00:00
|
|
|
if (__iscsi_conn_send_pdu(conn, hdr, data, data_size))
|
2007-12-13 18:43:30 +00:00
|
|
|
err = -EPERM;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-12-13 18:43:30 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
/**
|
2020-10-23 16:33:14 +00:00
|
|
|
* iscsi_scsi_cmd_rsp - SCSI Command Response processing
|
2006-04-07 02:13:41 +00:00
|
|
|
* @conn: iscsi connection
|
|
|
|
* @hdr: iscsi header
|
2008-05-21 20:54:09 +00:00
|
|
|
* @task: scsi command task
|
2006-04-07 02:13:41 +00:00
|
|
|
* @data: cmd data buffer
|
|
|
|
* @datalen: len of buffer
|
|
|
|
*
|
|
|
|
* iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
|
2019-02-25 17:41:30 +00:00
|
|
|
* then completes the command and task. called under back_lock
|
2006-04-07 02:13:41 +00:00
|
|
|
**/
|
2007-05-30 17:57:18 +00:00
|
|
|
static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_task *task, char *data,
|
2007-05-30 17:57:18 +00:00
|
|
|
int datalen)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2011-05-27 11:16:33 +00:00
|
|
|
struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr;
|
2006-04-07 02:13:41 +00:00
|
|
|
struct iscsi_session *session = conn->session;
|
2008-05-21 20:54:09 +00:00
|
|
|
struct scsi_cmnd *sc = task->sc;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2007-05-30 17:57:18 +00:00
|
|
|
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
|
2006-04-07 02:13:41 +00:00
|
|
|
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
|
|
|
|
|
|
|
|
sc->result = (DID_OK << 16) | rhdr->cmd_status;
|
|
|
|
|
2014-03-05 17:43:49 +00:00
|
|
|
if (task->protected) {
|
|
|
|
sector_t sector;
|
|
|
|
u8 ascq;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Transports that didn't implement check_protection
|
|
|
|
* callback but still published T10-PI support to scsi-mid
|
|
|
|
* deserve this BUG_ON.
|
|
|
|
**/
|
|
|
|
BUG_ON(!session->tt->check_protection);
|
|
|
|
|
|
|
|
ascq = session->tt->check_protection(task, §or);
|
|
|
|
if (ascq) {
|
2021-04-27 08:30:13 +00:00
|
|
|
scsi_build_sense(sc, 1, ILLEGAL_REQUEST, 0x10, ascq);
|
2015-07-15 07:55:39 +00:00
|
|
|
scsi_set_sense_information(sc->sense_buffer,
|
|
|
|
SCSI_SENSE_BUFFERSIZE,
|
|
|
|
sector);
|
2014-03-05 17:43:49 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
|
|
|
|
sc->result = DID_ERROR << 16;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
|
2006-12-17 18:10:28 +00:00
|
|
|
uint16_t senselen;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
|
|
|
if (datalen < 2) {
|
|
|
|
invalid_datalen:
|
2008-01-31 19:36:52 +00:00
|
|
|
iscsi_conn_printk(KERN_ERR, conn,
|
|
|
|
"Got CHECK_CONDITION but invalid data "
|
|
|
|
"buffer size of %d\n", datalen);
|
2006-04-07 02:13:41 +00:00
|
|
|
sc->result = DID_BAD_TARGET << 16;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-05-21 20:54:20 +00:00
|
|
|
senselen = get_unaligned_be16(data);
|
2006-04-07 02:13:41 +00:00
|
|
|
if (datalen < senselen)
|
|
|
|
goto invalid_datalen;
|
|
|
|
|
|
|
|
memcpy(sc->sense_buffer, data + 2,
|
2006-12-17 18:10:28 +00:00
|
|
|
min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_SESSION(session, "copied %d bytes of sense\n",
|
|
|
|
min_t(uint16_t, senselen,
|
|
|
|
SCSI_SENSE_BUFFERSIZE));
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2008-04-18 15:11:52 +00:00
|
|
|
if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
|
|
|
|
ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
|
2019-01-29 08:33:07 +00:00
|
|
|
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
|
2008-04-18 15:11:52 +00:00
|
|
|
}
|
|
|
|
|
2007-12-13 18:43:22 +00:00
|
|
|
if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
|
|
|
|
ISCSI_FLAG_CMD_OVERFLOW)) {
|
2006-04-07 02:13:41 +00:00
|
|
|
int res_count = be32_to_cpu(rhdr->residual_count);
|
|
|
|
|
2007-12-13 18:43:22 +00:00
|
|
|
if (res_count > 0 &&
|
|
|
|
(rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
|
|
|
|
res_count <= scsi_bufflen(sc)))
|
2008-04-18 15:11:52 +00:00
|
|
|
/* write side for bidi or uni-io set_resid */
|
2007-06-14 13:13:17 +00:00
|
|
|
scsi_set_resid(sc, res_count);
|
2006-04-07 02:13:41 +00:00
|
|
|
else
|
|
|
|
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
|
2008-04-18 15:11:52 +00:00
|
|
|
}
|
2006-04-07 02:13:41 +00:00
|
|
|
out:
|
2009-05-13 22:57:46 +00:00
|
|
|
ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
|
2009-03-05 20:45:58 +00:00
|
|
|
sc, sc->result, task->itt);
|
2006-04-07 02:13:41 +00:00
|
|
|
conn->scsirsp_pdus_cnt++;
|
2009-05-13 22:57:49 +00:00
|
|
|
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2008-09-24 16:46:09 +00:00
|
|
|
/**
|
|
|
|
* iscsi_data_in_rsp - SCSI Data-In Response processing
|
|
|
|
* @conn: iscsi connection
|
|
|
|
* @hdr: iscsi pdu
|
|
|
|
* @task: scsi command task
|
2019-02-25 17:41:30 +00:00
|
|
|
*
|
|
|
|
* iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received
|
|
|
|
* then completes the command and task. called under back_lock
|
2008-09-24 16:46:09 +00:00
|
|
|
**/
|
|
|
|
static void
|
|
|
|
iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
|
|
|
struct iscsi_task *task)
|
|
|
|
{
|
|
|
|
struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr;
|
|
|
|
struct scsi_cmnd *sc = task->sc;
|
|
|
|
|
|
|
|
if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
|
|
|
|
return;
|
|
|
|
|
2009-05-13 22:57:42 +00:00
|
|
|
iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
|
2008-09-24 16:46:09 +00:00
|
|
|
sc->result = (DID_OK << 16) | rhdr->cmd_status;
|
|
|
|
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
|
|
|
|
if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
|
|
|
|
ISCSI_FLAG_DATA_OVERFLOW)) {
|
|
|
|
int res_count = be32_to_cpu(rhdr->residual_count);
|
|
|
|
|
|
|
|
if (res_count > 0 &&
|
|
|
|
(rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
|
2019-01-29 08:33:07 +00:00
|
|
|
res_count <= sc->sdb.length))
|
2019-02-08 21:24:59 +00:00
|
|
|
scsi_set_resid(sc, res_count);
|
2008-09-24 16:46:09 +00:00
|
|
|
else
|
|
|
|
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
|
|
|
|
}
|
|
|
|
|
2009-05-13 22:57:46 +00:00
|
|
|
ISCSI_DBG_SESSION(conn->session, "data in with status done "
|
|
|
|
"[sc %p res %d itt 0x%x]\n",
|
|
|
|
sc, sc->result, task->itt);
|
2008-09-24 16:46:09 +00:00
|
|
|
conn->scsirsp_pdus_cnt++;
|
2009-05-13 22:57:49 +00:00
|
|
|
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
|
2008-09-24 16:46:09 +00:00
|
|
|
}
|
|
|
|
|
2006-07-24 20:47:22 +00:00
|
|
|
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
|
|
|
|
{
|
|
|
|
struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
|
2021-05-25 18:18:06 +00:00
|
|
|
struct iscsi_session *session = conn->session;
|
2006-07-24 20:47:22 +00:00
|
|
|
|
|
|
|
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
|
|
|
|
conn->tmfrsp_pdus_cnt++;
|
|
|
|
|
2021-05-25 18:18:06 +00:00
|
|
|
if (session->tmf_state != TMF_QUEUED)
|
2006-07-24 20:47:22 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_SUCCESS;
|
2006-07-24 20:47:22 +00:00
|
|
|
else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_NOT_FOUND;
|
2006-07-24 20:47:22 +00:00
|
|
|
else
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_FAILED;
|
|
|
|
wake_up(&session->ehwait);
|
2006-07-24 20:47:22 +00:00
|
|
|
}
|
|
|
|
|
2015-09-03 16:49:55 +00:00
|
|
|
static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
|
2007-12-13 18:43:30 +00:00
|
|
|
{
|
|
|
|
struct iscsi_nopout hdr;
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_task *task;
|
2007-12-13 18:43:30 +00:00
|
|
|
|
2020-11-06 19:33:17 +00:00
|
|
|
if (!rhdr) {
|
|
|
|
if (READ_ONCE(conn->ping_task))
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2007-12-13 18:43:30 +00:00
|
|
|
|
|
|
|
memset(&hdr, 0, sizeof(struct iscsi_nopout));
|
|
|
|
hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
|
|
|
|
hdr.flags = ISCSI_FLAG_CMD_FINAL;
|
|
|
|
|
|
|
|
if (rhdr) {
|
2011-06-16 22:57:09 +00:00
|
|
|
hdr.lun = rhdr->lun;
|
2007-12-13 18:43:30 +00:00
|
|
|
hdr.ttt = rhdr->ttt;
|
|
|
|
hdr.itt = RESERVED_ITT;
|
|
|
|
} else
|
|
|
|
hdr.ttt = RESERVED_ITT;
|
|
|
|
|
2022-06-16 22:45:57 +00:00
|
|
|
task = iscsi_alloc_mgmt_task(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
|
|
|
|
if (!task)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (!rhdr)
|
|
|
|
WRITE_ONCE(conn->ping_task, task);
|
|
|
|
|
|
|
|
if (iscsi_send_mgmt_task(task)) {
|
2020-11-06 19:33:17 +00:00
|
|
|
if (!rhdr)
|
|
|
|
WRITE_ONCE(conn->ping_task, NULL);
|
2022-06-16 22:45:57 +00:00
|
|
|
iscsi_put_task(task);
|
|
|
|
|
2008-01-31 19:36:52 +00:00
|
|
|
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
|
2015-09-03 16:49:55 +00:00
|
|
|
return -EIO;
|
|
|
|
} else if (!rhdr) {
|
2008-12-01 18:13:00 +00:00
|
|
|
/* only track our nops */
|
|
|
|
conn->last_ping = jiffies;
|
|
|
|
}
|
2015-09-03 16:49:55 +00:00
|
|
|
|
|
|
|
return 0;
|
2007-12-13 18:43:30 +00:00
|
|
|
}
|
|
|
|
|
2019-02-25 17:41:30 +00:00
|
|
|
/**
|
|
|
|
* iscsi_nop_out_rsp - SCSI NOP Response processing
|
|
|
|
* @task: scsi command task
|
|
|
|
* @nop: the nop structure
|
|
|
|
* @data: where to put the data
|
|
|
|
* @datalen: length of data
|
|
|
|
*
|
|
|
|
* iscsi_nop_out_rsp handles nop response from use or
|
|
|
|
* from user space. called under back_lock
|
|
|
|
**/
|
2009-08-20 20:10:59 +00:00
|
|
|
static int iscsi_nop_out_rsp(struct iscsi_task *task,
|
|
|
|
struct iscsi_nopin *nop, char *data, int datalen)
|
|
|
|
{
|
|
|
|
struct iscsi_conn *conn = task->conn;
|
|
|
|
int rc = 0;
|
|
|
|
|
2020-11-06 19:33:17 +00:00
|
|
|
if (READ_ONCE(conn->ping_task) != task) {
|
2009-08-20 20:10:59 +00:00
|
|
|
/*
|
|
|
|
* If this is not in response to one of our
|
|
|
|
* nops then it must be from userspace.
|
|
|
|
*/
|
|
|
|
if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop,
|
|
|
|
data, datalen))
|
|
|
|
rc = ISCSI_ERR_CONN_FAILED;
|
|
|
|
} else
|
|
|
|
mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
|
|
|
|
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
[SCSI] iscsi_tcp: fix padding, data digests, and IO at weird offsets
iscsi_tcp calculates padding by using the expected transfer length. This
has the problem where if we have immediate data = no and initial R2T =
yes, and the transfer length ended up needing padding then we send:
1. header
2. padding which should have gone after data
3. data
Besides this bug, we also assume the target will always ask for nice
transfer lengths and the first burst length will always be a nice value.
As far as I can tell form the RFC this is not a requirement. It would be
silly to do this, but if someone did it we will end doing bad things.
Finally the last bug in that bit of code is in our handling of the
recalculation of data digests when we do not send a whole iscsi_buf in
one try. The bug here is that we call crypto_digest_final on a
iscsi_sendpage error, then when we send the rest of the iscsi_buf, we
doiscsi_data_digest_init and this causes the previous data digest to be
lost.
And to make matters worse, some of these bugs are replicated over and
over and over again for immediate data, solicited data and unsolicited
data. So the attached patch made over the iscsi git tree (see
kernel.org/git for details) which I updated today to include the patches
I said I merged, consolidates the sending of data, padding and digests
and calculation of data digests and fixes the above bugs.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2006-08-31 22:09:27 +00:00
|
|
|
static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
|
|
|
char *data, int datalen)
|
|
|
|
{
|
|
|
|
struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
|
|
|
|
struct iscsi_hdr rejected_pdu;
|
2009-08-20 20:10:59 +00:00
|
|
|
int opcode, rc = 0;
|
[SCSI] iscsi_tcp: fix padding, data digests, and IO at weird offsets
iscsi_tcp calculates padding by using the expected transfer length. This
has the problem where if we have immediate data = no and initial R2T =
yes, and the transfer length ended up needing padding then we send:
1. header
2. padding which should have gone after data
3. data
Besides this bug, we also assume the target will always ask for nice
transfer lengths and the first burst length will always be a nice value.
As far as I can tell form the RFC this is not a requirement. It would be
silly to do this, but if someone did it we will end doing bad things.
Finally the last bug in that bit of code is in our handling of the
recalculation of data digests when we do not send a whole iscsi_buf in
one try. The bug here is that we call crypto_digest_final on a
iscsi_sendpage error, then when we send the rest of the iscsi_buf, we
doiscsi_data_digest_init and this causes the previous data digest to be
lost.
And to make matters worse, some of these bugs are replicated over and
over and over again for immediate data, solicited data and unsolicited
data. So the attached patch made over the iscsi git tree (see
kernel.org/git for details) which I updated today to include the patches
I said I merged, consolidates the sending of data, padding and digests
and calculation of data digests and fixes the above bugs.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2006-08-31 22:09:27 +00:00
|
|
|
|
|
|
|
conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
|
|
|
|
|
2009-08-20 20:10:59 +00:00
|
|
|
if (ntoh24(reject->dlength) > datalen ||
|
|
|
|
ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) {
|
|
|
|
iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected "
|
|
|
|
"pdu. Invalid data length (pdu dlength "
|
|
|
|
"%u, datalen %d\n", ntoh24(reject->dlength),
|
|
|
|
datalen);
|
|
|
|
return ISCSI_ERR_PROTO;
|
|
|
|
}
|
|
|
|
memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
|
|
|
|
opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK;
|
|
|
|
|
|
|
|
switch (reject->reason) {
|
|
|
|
case ISCSI_REASON_DATA_DIGEST_ERROR:
|
|
|
|
iscsi_conn_printk(KERN_ERR, conn,
|
|
|
|
"pdu (op 0x%x itt 0x%x) rejected "
|
|
|
|
"due to DataDigest error.\n",
|
2014-01-09 02:21:34 +00:00
|
|
|
opcode, rejected_pdu.itt);
|
2009-08-20 20:10:59 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_REASON_IMM_CMD_REJECT:
|
|
|
|
iscsi_conn_printk(KERN_ERR, conn,
|
|
|
|
"pdu (op 0x%x itt 0x%x) rejected. Too many "
|
|
|
|
"immediate commands.\n",
|
2014-01-09 02:21:34 +00:00
|
|
|
opcode, rejected_pdu.itt);
|
2009-08-20 20:10:59 +00:00
|
|
|
/*
|
|
|
|
* We only send one TMF at a time so if the target could not
|
|
|
|
* handle it, then it should get fixed (RFC mandates that
|
|
|
|
* a target can handle one immediate TMF per conn).
|
|
|
|
*
|
|
|
|
* For nops-outs, we could have sent more than one if
|
|
|
|
* the target is sending us lots of nop-ins
|
|
|
|
*/
|
|
|
|
if (opcode != ISCSI_OP_NOOP_OUT)
|
|
|
|
return 0;
|
[SCSI] iscsi_tcp: fix padding, data digests, and IO at weird offsets
iscsi_tcp calculates padding by using the expected transfer length. This
has the problem where if we have immediate data = no and initial R2T =
yes, and the transfer length ended up needing padding then we send:
1. header
2. padding which should have gone after data
3. data
Besides this bug, we also assume the target will always ask for nice
transfer lengths and the first burst length will always be a nice value.
As far as I can tell form the RFC this is not a requirement. It would be
silly to do this, but if someone did it we will end doing bad things.
Finally the last bug in that bit of code is in our handling of the
recalculation of data digests when we do not send a whole iscsi_buf in
one try. The bug here is that we call crypto_digest_final on a
iscsi_sendpage error, then when we send the rest of the iscsi_buf, we
doiscsi_data_digest_init and this causes the previous data digest to be
lost.
And to make matters worse, some of these bugs are replicated over and
over and over again for immediate data, solicited data and unsolicited
data. So the attached patch made over the iscsi git tree (see
kernel.org/git for details) which I updated today to include the patches
I said I merged, consolidates the sending of data, padding and digests
and calculation of data digests and fixes the above bugs.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2006-08-31 22:09:27 +00:00
|
|
|
|
2017-08-25 20:46:37 +00:00
|
|
|
if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
|
2009-08-20 20:10:59 +00:00
|
|
|
/*
|
|
|
|
* nop-out in response to target's nop-out rejected.
|
|
|
|
* Just resend.
|
|
|
|
*/
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
/* In RX path we are under back lock */
|
|
|
|
spin_unlock(&conn->session->back_lock);
|
|
|
|
spin_lock(&conn->session->frwd_lock);
|
2009-08-20 20:10:59 +00:00
|
|
|
iscsi_send_nopout(conn,
|
|
|
|
(struct iscsi_nopin*)&rejected_pdu);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock(&conn->session->frwd_lock);
|
|
|
|
spin_lock(&conn->session->back_lock);
|
|
|
|
} else {
|
2009-08-20 20:10:59 +00:00
|
|
|
struct iscsi_task *task;
|
|
|
|
/*
|
|
|
|
* Our nop as ping got dropped. We know the target
|
|
|
|
* and transport are ok so just clean up
|
|
|
|
*/
|
|
|
|
task = iscsi_itt_to_task(conn, rejected_pdu.itt);
|
|
|
|
if (!task) {
|
|
|
|
iscsi_conn_printk(KERN_ERR, conn,
|
|
|
|
"Invalid pdu reject. Could "
|
|
|
|
"not lookup rejected task.\n");
|
|
|
|
rc = ISCSI_ERR_BAD_ITT;
|
|
|
|
} else
|
|
|
|
rc = iscsi_nop_out_rsp(task,
|
|
|
|
(struct iscsi_nopin*)&rejected_pdu,
|
|
|
|
NULL, 0);
|
[SCSI] iscsi_tcp: fix padding, data digests, and IO at weird offsets
iscsi_tcp calculates padding by using the expected transfer length. This
has the problem where if we have immediate data = no and initial R2T =
yes, and the transfer length ended up needing padding then we send:
1. header
2. padding which should have gone after data
3. data
Besides this bug, we also assume the target will always ask for nice
transfer lengths and the first burst length will always be a nice value.
As far as I can tell form the RFC this is not a requirement. It would be
silly to do this, but if someone did it we will end doing bad things.
Finally the last bug in that bit of code is in our handling of the
recalculation of data digests when we do not send a whole iscsi_buf in
one try. The bug here is that we call crypto_digest_final on a
iscsi_sendpage error, then when we send the rest of the iscsi_buf, we
doiscsi_data_digest_init and this causes the previous data digest to be
lost.
And to make matters worse, some of these bugs are replicated over and
over and over again for immediate data, solicited data and unsolicited
data. So the attached patch made over the iscsi git tree (see
kernel.org/git for details) which I updated today to include the patches
I said I merged, consolidates the sending of data, padding and digests
and calculation of data digests and fixes the above bugs.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2006-08-31 22:09:27 +00:00
|
|
|
}
|
2009-08-20 20:10:59 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
iscsi_conn_printk(KERN_ERR, conn,
|
|
|
|
"pdu (op 0x%x itt 0x%x) rejected. Reason "
|
2014-01-09 02:21:34 +00:00
|
|
|
"code 0x%x\n", rejected_pdu.opcode,
|
|
|
|
rejected_pdu.itt, reject->reason);
|
2009-08-20 20:10:59 +00:00
|
|
|
break;
|
[SCSI] iscsi_tcp: fix padding, data digests, and IO at weird offsets
iscsi_tcp calculates padding by using the expected transfer length. This
has the problem where if we have immediate data = no and initial R2T =
yes, and the transfer length ended up needing padding then we send:
1. header
2. padding which should have gone after data
3. data
Besides this bug, we also assume the target will always ask for nice
transfer lengths and the first burst length will always be a nice value.
As far as I can tell form the RFC this is not a requirement. It would be
silly to do this, but if someone did it we will end doing bad things.
Finally the last bug in that bit of code is in our handling of the
recalculation of data digests when we do not send a whole iscsi_buf in
one try. The bug here is that we call crypto_digest_final on a
iscsi_sendpage error, then when we send the rest of the iscsi_buf, we
doiscsi_data_digest_init and this causes the previous data digest to be
lost.
And to make matters worse, some of these bugs are replicated over and
over and over again for immediate data, solicited data and unsolicited
data. So the attached patch made over the iscsi git tree (see
kernel.org/git for details) which I updated today to include the patches
I said I merged, consolidates the sending of data, padding and digests
and calculation of data digests and fixes the above bugs.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2006-08-31 22:09:27 +00:00
|
|
|
}
|
2009-08-20 20:10:59 +00:00
|
|
|
return rc;
|
[SCSI] iscsi_tcp: fix padding, data digests, and IO at weird offsets
iscsi_tcp calculates padding by using the expected transfer length. This
has the problem where if we have immediate data = no and initial R2T =
yes, and the transfer length ended up needing padding then we send:
1. header
2. padding which should have gone after data
3. data
Besides this bug, we also assume the target will always ask for nice
transfer lengths and the first burst length will always be a nice value.
As far as I can tell form the RFC this is not a requirement. It would be
silly to do this, but if someone did it we will end doing bad things.
Finally the last bug in that bit of code is in our handling of the
recalculation of data digests when we do not send a whole iscsi_buf in
one try. The bug here is that we call crypto_digest_final on a
iscsi_sendpage error, then when we send the rest of the iscsi_buf, we
doiscsi_data_digest_init and this causes the previous data digest to be
lost.
And to make matters worse, some of these bugs are replicated over and
over and over again for immediate data, solicited data and unsolicited
data. So the attached patch made over the iscsi git tree (see
kernel.org/git for details) which I updated today to include the patches
I said I merged, consolidates the sending of data, padding and digests
and calculation of data digests and fixes the above bugs.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2006-08-31 22:09:27 +00:00
|
|
|
}
|
|
|
|
|
2008-05-21 20:54:18 +00:00
|
|
|
/**
|
|
|
|
* iscsi_itt_to_task - look up task by itt
|
|
|
|
* @conn: iscsi connection
|
|
|
|
* @itt: itt
|
|
|
|
*
|
|
|
|
* This should be used for mgmt tasks like login and nops, or if
|
|
|
|
* the LDD's itt space does not include the session age.
|
|
|
|
*
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
* The session back_lock must be held.
|
2008-05-21 20:54:18 +00:00
|
|
|
*/
|
2009-05-13 22:57:41 +00:00
|
|
|
struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
|
2008-05-21 20:54:18 +00:00
|
|
|
{
|
|
|
|
struct iscsi_session *session = conn->session;
|
2008-12-02 06:32:13 +00:00
|
|
|
int i;
|
2008-05-21 20:54:18 +00:00
|
|
|
|
|
|
|
if (itt == RESERVED_ITT)
|
|
|
|
return NULL;
|
|
|
|
|
2008-12-02 06:32:13 +00:00
|
|
|
if (session->tt->parse_pdu_itt)
|
|
|
|
session->tt->parse_pdu_itt(conn, itt, &i, NULL);
|
|
|
|
else
|
|
|
|
i = get_itt(itt);
|
2008-05-21 20:54:18 +00:00
|
|
|
if (i >= session->cmds_max)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return session->cmds[i];
|
|
|
|
}
|
2009-05-13 22:57:41 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
|
2008-05-21 20:54:18 +00:00
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
/**
|
|
|
|
* __iscsi_complete_pdu - complete pdu
|
|
|
|
* @conn: iscsi conn
|
|
|
|
* @hdr: iscsi header
|
|
|
|
* @data: data buffer
|
|
|
|
* @datalen: len of data buffer
|
|
|
|
*
|
|
|
|
* Completes pdu processing by freeing any resources allocated at
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
* queuecommand or send generic. session back_lock must be held and verify
|
2006-04-07 02:13:41 +00:00
|
|
|
* itt must have been called.
|
|
|
|
*/
|
2008-05-21 20:54:18 +00:00
|
|
|
int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
|
|
|
char *data, int datalen)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
|
|
|
struct iscsi_session *session = conn->session;
|
|
|
|
int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_task *task;
|
2006-04-07 02:13:41 +00:00
|
|
|
uint32_t itt;
|
|
|
|
|
2007-12-13 18:43:30 +00:00
|
|
|
conn->last_recv = jiffies;
|
2008-05-21 20:54:04 +00:00
|
|
|
rc = iscsi_verify_itt(conn, hdr->itt);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2007-02-09 16:39:40 +00:00
|
|
|
if (hdr->itt != RESERVED_ITT)
|
|
|
|
itt = get_itt(hdr->itt);
|
2006-04-07 02:13:41 +00:00
|
|
|
else
|
2007-02-09 16:39:40 +00:00
|
|
|
itt = ~0U;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_SESSION(session, "[op 0x%x cid %d itt 0x%x len %d]\n",
|
|
|
|
opcode, conn->id, itt, datalen);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:54:06 +00:00
|
|
|
if (itt == ~0U) {
|
2007-05-30 17:57:18 +00:00
|
|
|
iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
|
[SCSI] iscsi_tcp: fix padding, data digests, and IO at weird offsets
iscsi_tcp calculates padding by using the expected transfer length. This
has the problem where if we have immediate data = no and initial R2T =
yes, and the transfer length ended up needing padding then we send:
1. header
2. padding which should have gone after data
3. data
Besides this bug, we also assume the target will always ask for nice
transfer lengths and the first burst length will always be a nice value.
As far as I can tell form the RFC this is not a requirement. It would be
silly to do this, but if someone did it we will end doing bad things.
Finally the last bug in that bit of code is in our handling of the
recalculation of data digests when we do not send a whole iscsi_buf in
one try. The bug here is that we call crypto_digest_final on a
iscsi_sendpage error, then when we send the rest of the iscsi_buf, we
doiscsi_data_digest_init and this causes the previous data digest to be
lost.
And to make matters worse, some of these bugs are replicated over and
over and over again for immediate data, solicited data and unsolicited
data. So the attached patch made over the iscsi git tree (see
kernel.org/git for details) which I updated today to include the patches
I said I merged, consolidates the sending of data, padding and digests
and calculation of data digests and fixes the above bugs.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2006-08-31 22:09:27 +00:00
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
switch(opcode) {
|
|
|
|
case ISCSI_OP_NOOP_IN:
|
2006-07-24 20:47:45 +00:00
|
|
|
if (datalen) {
|
2006-04-07 02:13:41 +00:00
|
|
|
rc = ISCSI_ERR_PROTO;
|
2006-07-24 20:47:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-02-09 16:39:40 +00:00
|
|
|
if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
|
2006-07-24 20:47:45 +00:00
|
|
|
break;
|
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
/* In RX path we are under back lock */
|
|
|
|
spin_unlock(&session->back_lock);
|
|
|
|
spin_lock(&session->frwd_lock);
|
2007-12-13 18:43:30 +00:00
|
|
|
iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock(&session->frwd_lock);
|
|
|
|
spin_lock(&session->back_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_OP_REJECT:
|
[SCSI] iscsi_tcp: fix padding, data digests, and IO at weird offsets
iscsi_tcp calculates padding by using the expected transfer length. This
has the problem where if we have immediate data = no and initial R2T =
yes, and the transfer length ended up needing padding then we send:
1. header
2. padding which should have gone after data
3. data
Besides this bug, we also assume the target will always ask for nice
transfer lengths and the first burst length will always be a nice value.
As far as I can tell form the RFC this is not a requirement. It would be
silly to do this, but if someone did it we will end doing bad things.
Finally the last bug in that bit of code is in our handling of the
recalculation of data digests when we do not send a whole iscsi_buf in
one try. The bug here is that we call crypto_digest_final on a
iscsi_sendpage error, then when we send the rest of the iscsi_buf, we
doiscsi_data_digest_init and this causes the previous data digest to be
lost.
And to make matters worse, some of these bugs are replicated over and
over and over again for immediate data, solicited data and unsolicited
data. So the attached patch made over the iscsi git tree (see
kernel.org/git for details) which I updated today to include the patches
I said I merged, consolidates the sending of data, padding and digests
and calculation of data digests and fixes the above bugs.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2006-08-31 22:09:27 +00:00
|
|
|
rc = iscsi_handle_reject(conn, hdr, data, datalen);
|
|
|
|
break;
|
2006-04-07 02:13:41 +00:00
|
|
|
case ISCSI_OP_ASYNC_EVENT:
|
2006-05-03 00:46:47 +00:00
|
|
|
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
|
2006-10-16 22:09:41 +00:00
|
|
|
if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
|
|
|
|
rc = ISCSI_ERR_CONN_FAILED;
|
2006-04-07 02:13:41 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
rc = ISCSI_ERR_BAD_OPCODE;
|
|
|
|
break;
|
|
|
|
}
|
2008-05-21 20:54:06 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch(opcode) {
|
|
|
|
case ISCSI_OP_SCSI_CMD_RSP:
|
2008-05-21 20:54:18 +00:00
|
|
|
case ISCSI_OP_SCSI_DATA_IN:
|
|
|
|
task = iscsi_itt_to_ctask(conn, hdr->itt);
|
|
|
|
if (!task)
|
|
|
|
return ISCSI_ERR_BAD_ITT;
|
2009-06-16 03:11:08 +00:00
|
|
|
task->last_xfer = jiffies;
|
2008-05-21 20:54:18 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_OP_R2T:
|
|
|
|
/*
|
|
|
|
* LLD handles R2Ts if they need to.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
case ISCSI_OP_LOGOUT_RSP:
|
|
|
|
case ISCSI_OP_LOGIN_RSP:
|
|
|
|
case ISCSI_OP_TEXT_RSP:
|
|
|
|
case ISCSI_OP_SCSI_TMFUNC_RSP:
|
|
|
|
case ISCSI_OP_NOOP_IN:
|
|
|
|
task = iscsi_itt_to_task(conn, hdr->itt);
|
|
|
|
if (!task)
|
|
|
|
return ISCSI_ERR_BAD_ITT;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return ISCSI_ERR_BAD_OPCODE;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch(opcode) {
|
|
|
|
case ISCSI_OP_SCSI_CMD_RSP:
|
2008-05-21 20:54:09 +00:00
|
|
|
iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
|
2008-05-21 20:54:06 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_OP_SCSI_DATA_IN:
|
2008-09-24 16:46:09 +00:00
|
|
|
iscsi_data_in_rsp(conn, hdr, task);
|
2008-05-21 20:54:06 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_OP_LOGOUT_RSP:
|
|
|
|
iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
|
|
|
|
if (datalen) {
|
|
|
|
rc = ISCSI_ERR_PROTO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
|
|
|
|
goto recv_pdu;
|
|
|
|
case ISCSI_OP_LOGIN_RSP:
|
|
|
|
case ISCSI_OP_TEXT_RSP:
|
|
|
|
iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
|
|
|
|
/*
|
|
|
|
* login related PDU's exp_statsn is handled in
|
|
|
|
* userspace
|
|
|
|
*/
|
|
|
|
goto recv_pdu;
|
|
|
|
case ISCSI_OP_SCSI_TMFUNC_RSP:
|
|
|
|
iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
|
|
|
|
if (datalen) {
|
|
|
|
rc = ISCSI_ERR_PROTO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
iscsi_tmf_rsp(conn, hdr);
|
2009-05-13 22:57:49 +00:00
|
|
|
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
|
2008-05-21 20:54:06 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_OP_NOOP_IN:
|
|
|
|
iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
|
|
|
|
if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
|
|
|
|
rc = ISCSI_ERR_PROTO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
|
|
|
|
|
2009-08-20 20:10:59 +00:00
|
|
|
rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr,
|
|
|
|
data, datalen);
|
2008-05-21 20:54:06 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
rc = ISCSI_ERR_BAD_OPCODE;
|
|
|
|
break;
|
|
|
|
}
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:54:06 +00:00
|
|
|
out:
|
|
|
|
return rc;
|
|
|
|
recv_pdu:
|
|
|
|
if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
|
|
|
|
rc = ISCSI_ERR_CONN_FAILED;
|
2009-05-13 22:57:49 +00:00
|
|
|
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
|
2006-04-07 02:13:41 +00:00
|
|
|
return rc;
|
|
|
|
}
|
2008-05-21 20:54:18 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
|
|
|
int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
|
|
|
char *data, int datalen)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock(&conn->session->back_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock(&conn->session->back_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
|
|
|
|
|
2008-05-21 20:54:04 +00:00
|
|
|
int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
|
|
|
struct iscsi_session *session = conn->session;
|
2008-12-02 06:32:13 +00:00
|
|
|
int age = 0, i = 0;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:54:04 +00:00
|
|
|
if (itt == RESERVED_ITT)
|
|
|
|
return 0;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-12-02 06:32:13 +00:00
|
|
|
if (session->tt->parse_pdu_itt)
|
|
|
|
session->tt->parse_pdu_itt(conn, itt, &i, &age);
|
|
|
|
else {
|
|
|
|
i = get_itt(itt);
|
|
|
|
age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (age != session->age) {
|
2008-05-21 20:54:04 +00:00
|
|
|
iscsi_conn_printk(KERN_ERR, conn,
|
|
|
|
"received itt %x expected session age (%x)\n",
|
2008-05-21 20:54:18 +00:00
|
|
|
(__force u32)itt, session->age);
|
2008-05-21 20:54:04 +00:00
|
|
|
return ISCSI_ERR_BAD_ITT;
|
|
|
|
}
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:54:06 +00:00
|
|
|
if (i >= session->cmds_max) {
|
|
|
|
iscsi_conn_printk(KERN_ERR, conn,
|
|
|
|
"received invalid itt index %u (max cmds "
|
|
|
|
"%u.\n", i, session->cmds_max);
|
|
|
|
return ISCSI_ERR_BAD_ITT;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_verify_itt);
|
|
|
|
|
2008-05-21 20:54:18 +00:00
|
|
|
/**
|
|
|
|
* iscsi_itt_to_ctask - look up ctask by itt
|
|
|
|
* @conn: iscsi connection
|
|
|
|
* @itt: itt
|
|
|
|
*
|
|
|
|
* This should be used for cmd tasks.
|
|
|
|
*
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
* The session back_lock must be held.
|
2008-05-21 20:54:18 +00:00
|
|
|
*/
|
|
|
|
struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
|
2008-05-21 20:54:04 +00:00
|
|
|
{
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_task *task;
|
2008-05-21 20:54:04 +00:00
|
|
|
|
|
|
|
if (iscsi_verify_itt(conn, itt))
|
|
|
|
return NULL;
|
|
|
|
|
2008-05-21 20:54:18 +00:00
|
|
|
task = iscsi_itt_to_task(conn, itt);
|
|
|
|
if (!task || !task->sc)
|
2008-05-21 20:54:04 +00:00
|
|
|
return NULL;
|
|
|
|
|
2022-02-18 19:50:53 +00:00
|
|
|
if (iscsi_cmd(task->sc)->age != conn->session->age) {
|
2008-05-21 20:54:18 +00:00
|
|
|
iscsi_session_printk(KERN_ERR, conn->session,
|
|
|
|
"task's session age %d, expected %d\n",
|
2022-02-18 19:50:53 +00:00
|
|
|
iscsi_cmd(task->sc)->age, conn->session->age);
|
2008-05-21 20:54:04 +00:00
|
|
|
return NULL;
|
2008-05-21 20:54:18 +00:00
|
|
|
}
|
2008-05-21 20:54:04 +00:00
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
return task;
|
2008-05-21 20:54:04 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
|
|
|
|
|
2009-03-05 20:46:05 +00:00
|
|
|
void iscsi_session_failure(struct iscsi_session *session,
|
2008-09-24 16:46:10 +00:00
|
|
|
enum iscsi_err err)
|
|
|
|
{
|
|
|
|
struct iscsi_conn *conn;
|
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2008-09-24 16:46:10 +00:00
|
|
|
conn = session->leadconn;
|
|
|
|
if (session->state == ISCSI_STATE_TERMINATE || !conn) {
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2008-09-24 16:46:10 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-05-25 18:18:03 +00:00
|
|
|
iscsi_get_conn(conn->cls_conn);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2008-09-24 16:46:10 +00:00
|
|
|
/*
|
|
|
|
* if the host is being removed bypass the connection
|
|
|
|
* recovery initialization because we are going to kill
|
|
|
|
* the session.
|
|
|
|
*/
|
|
|
|
if (err == ISCSI_ERR_INVALID_HOST)
|
|
|
|
iscsi_conn_error_event(conn->cls_conn, err);
|
|
|
|
else
|
|
|
|
iscsi_conn_failure(conn, err);
|
2021-05-25 18:18:03 +00:00
|
|
|
iscsi_put_conn(conn->cls_conn);
|
2008-09-24 16:46:10 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_session_failure);
|
|
|
|
|
2021-05-25 18:17:55 +00:00
|
|
|
static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
|
|
|
struct iscsi_session *session = conn->session;
|
|
|
|
|
2021-05-25 18:17:55 +00:00
|
|
|
if (session->state == ISCSI_STATE_FAILED)
|
|
|
|
return false;
|
2006-05-19 01:31:42 +00:00
|
|
|
|
2006-05-30 05:37:20 +00:00
|
|
|
if (conn->stop_stage == 0)
|
2006-04-07 02:13:41 +00:00
|
|
|
session->state = ISCSI_STATE_FAILED;
|
2008-09-24 16:46:10 +00:00
|
|
|
|
2022-04-08 00:13:11 +00:00
|
|
|
set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
|
|
|
|
set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
|
2021-05-25 18:17:55 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
|
|
|
|
{
|
|
|
|
struct iscsi_session *session = conn->session;
|
|
|
|
bool needs_evt;
|
|
|
|
|
|
|
|
spin_lock_bh(&session->frwd_lock);
|
|
|
|
needs_evt = iscsi_set_conn_failed(conn);
|
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
|
|
|
|
|
|
|
if (needs_evt)
|
|
|
|
iscsi_conn_error_event(conn->cls_conn, err);
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
|
|
|
|
|
2007-05-30 17:57:18 +00:00
|
|
|
static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
|
|
|
|
{
|
|
|
|
struct iscsi_session *session = conn->session;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for iSCSI window and take care of CmdSN wrap-around
|
|
|
|
*/
|
2007-07-26 17:46:48 +00:00
|
|
|
if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_SESSION(session, "iSCSI CmdSN closed. ExpCmdSn "
|
|
|
|
"%u MaxCmdSN %u CmdSN %u/%u\n",
|
|
|
|
session->exp_cmdsn, session->max_cmdsn,
|
|
|
|
session->cmdsn, session->queued_cmdsn);
|
2007-05-30 17:57:18 +00:00
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-07 04:46:01 +00:00
|
|
|
static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
|
|
|
|
bool was_requeue)
|
2007-05-30 17:57:18 +00:00
|
|
|
{
|
2007-12-13 18:43:20 +00:00
|
|
|
int rc;
|
2007-05-30 17:57:18 +00:00
|
|
|
|
2021-02-07 04:46:01 +00:00
|
|
|
if (!conn->task) {
|
2022-06-16 22:45:55 +00:00
|
|
|
/*
|
|
|
|
* Take a ref so we can access it after xmit_task().
|
|
|
|
*
|
|
|
|
* This should never fail because the failure paths will have
|
|
|
|
* stopped the xmit thread.
|
|
|
|
*/
|
|
|
|
if (!iscsi_get_task(task)) {
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
return 0;
|
|
|
|
}
|
2021-02-07 04:46:01 +00:00
|
|
|
} else {
|
|
|
|
/* Already have a ref from when we failed to send it last call */
|
|
|
|
conn->task = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this was a requeue for a R2T we have an extra ref on the task in
|
|
|
|
* case a bad target sends a cmd rsp before we have handled the task.
|
|
|
|
*/
|
|
|
|
if (was_requeue)
|
2022-06-16 22:45:56 +00:00
|
|
|
iscsi_put_task(task);
|
2021-02-07 04:46:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do this after dropping the extra ref because if this was a requeue
|
|
|
|
* it's removed from that list and cleanup_queued_task would miss it.
|
|
|
|
*/
|
2022-04-08 00:13:11 +00:00
|
|
|
if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
|
2021-02-07 04:46:01 +00:00
|
|
|
/*
|
|
|
|
* Save the task and ref in case we weren't cleaning up this
|
|
|
|
* task and get woken up again.
|
|
|
|
*/
|
|
|
|
conn->task = task;
|
2019-02-13 05:21:39 +00:00
|
|
|
return -ENODATA;
|
|
|
|
}
|
2021-02-07 04:46:01 +00:00
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&conn->session->frwd_lock);
|
2008-05-21 20:54:09 +00:00
|
|
|
rc = conn->session->tt->xmit_task(task);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&conn->session->frwd_lock);
|
2009-06-16 03:11:08 +00:00
|
|
|
if (!rc) {
|
2008-05-21 20:54:09 +00:00
|
|
|
/* done with this task */
|
2009-06-16 03:11:08 +00:00
|
|
|
task->last_xfer = jiffies;
|
2022-06-16 22:45:56 +00:00
|
|
|
} else {
|
2021-02-07 04:46:01 +00:00
|
|
|
/*
|
|
|
|
* get an extra ref that is released next time we access it
|
|
|
|
* as conn->task above.
|
|
|
|
*/
|
2022-06-16 22:45:55 +00:00
|
|
|
iscsi_get_task(task);
|
2021-02-07 04:46:01 +00:00
|
|
|
conn->task = task;
|
|
|
|
}
|
|
|
|
|
2022-06-16 22:45:56 +00:00
|
|
|
iscsi_put_task(task);
|
2007-05-30 17:57:18 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2007-12-13 18:43:20 +00:00
|
|
|
/**
|
2008-05-21 20:54:09 +00:00
|
|
|
* iscsi_requeue_task - requeue task to run from session workqueue
|
|
|
|
* @task: task to requeue
|
2007-12-13 18:43:20 +00:00
|
|
|
*
|
2021-02-07 04:46:01 +00:00
|
|
|
* Callers must have taken a ref to the task that is going to be requeued.
|
2007-12-13 18:43:20 +00:00
|
|
|
*/
|
2008-05-21 20:54:09 +00:00
|
|
|
void iscsi_requeue_task(struct iscsi_task *task)
|
2007-12-13 18:43:20 +00:00
|
|
|
{
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_conn *conn = task->conn;
|
2007-12-13 18:43:20 +00:00
|
|
|
|
2009-05-13 22:57:46 +00:00
|
|
|
/*
|
|
|
|
* this may be on the requeue list already if the xmit_task callout
|
|
|
|
* is handling the r2ts while we are adding new ones
|
|
|
|
*/
|
2021-02-07 04:46:01 +00:00
|
|
|
spin_lock_bh(&conn->session->frwd_lock);
|
|
|
|
if (list_empty(&task->running)) {
|
2009-05-13 22:57:46 +00:00
|
|
|
list_add_tail(&task->running, &conn->requeue);
|
2021-02-07 04:46:01 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Don't need the extra ref since it's already requeued and
|
|
|
|
* has a ref.
|
|
|
|
*/
|
|
|
|
iscsi_put_task(task);
|
|
|
|
}
|
2022-06-16 22:45:49 +00:00
|
|
|
iscsi_conn_queue_xmit(conn);
|
2021-02-07 04:46:01 +00:00
|
|
|
spin_unlock_bh(&conn->session->frwd_lock);
|
2007-12-13 18:43:20 +00:00
|
|
|
}
|
2008-05-21 20:54:09 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
|
2007-12-13 18:43:20 +00:00
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
/**
|
|
|
|
* iscsi_data_xmit - xmit any command into the scheduled connection
|
|
|
|
* @conn: iscsi connection
|
|
|
|
*
|
|
|
|
* Notes:
|
|
|
|
* The function can return -EAGAIN in which case the caller must
|
|
|
|
* re-schedule it again later or recover. '0' return code means
|
|
|
|
* successful xmit.
|
|
|
|
**/
|
|
|
|
static int iscsi_data_xmit(struct iscsi_conn *conn)
|
|
|
|
{
|
2009-11-11 22:34:32 +00:00
|
|
|
struct iscsi_task *task;
|
2006-05-30 05:37:28 +00:00
|
|
|
int rc = 0;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&conn->session->frwd_lock);
|
2022-04-08 00:13:11 +00:00
|
|
|
if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&conn->session->frwd_lock);
|
2006-05-30 05:37:28 +00:00
|
|
|
return -ENODATA;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
if (conn->task) {
|
2021-02-07 04:46:01 +00:00
|
|
|
rc = iscsi_xmit_task(conn, conn->task, false);
|
2006-05-30 05:37:28 +00:00
|
|
|
if (rc)
|
2009-08-20 20:11:03 +00:00
|
|
|
goto done;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2007-05-30 17:57:18 +00:00
|
|
|
/*
|
|
|
|
* process mgmt pdus like nops before commands since we should
|
|
|
|
* only have one nop-out as a ping from us and targets should not
|
|
|
|
* overflow us with nop-ins
|
|
|
|
*/
|
|
|
|
check_mgmt:
|
2007-12-13 18:43:20 +00:00
|
|
|
while (!list_empty(&conn->mgmtqueue)) {
|
2021-02-07 04:46:01 +00:00
|
|
|
task = list_entry(conn->mgmtqueue.next, struct iscsi_task,
|
|
|
|
running);
|
|
|
|
list_del_init(&task->running);
|
|
|
|
if (iscsi_prep_mgmt_task(conn, task)) {
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
/* regular RX path uses back_lock */
|
|
|
|
spin_lock_bh(&conn->session->back_lock);
|
2021-02-07 04:46:01 +00:00
|
|
|
__iscsi_put_task(task);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&conn->session->back_lock);
|
2007-12-13 18:43:26 +00:00
|
|
|
continue;
|
|
|
|
}
|
2021-02-07 04:46:01 +00:00
|
|
|
rc = iscsi_xmit_task(conn, task, false);
|
2007-05-30 17:57:18 +00:00
|
|
|
if (rc)
|
2009-08-20 20:11:03 +00:00
|
|
|
goto done;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2022-06-07 13:19:53 +00:00
|
|
|
check_requeue:
|
|
|
|
while (!list_empty(&conn->requeue)) {
|
|
|
|
/*
|
|
|
|
* we always do fastlogout - conn stop code will clean up.
|
|
|
|
*/
|
|
|
|
if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
|
|
|
|
break;
|
|
|
|
|
|
|
|
task = list_entry(conn->requeue.next, struct iscsi_task,
|
|
|
|
running);
|
|
|
|
|
|
|
|
if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
|
|
|
|
break;
|
|
|
|
|
|
|
|
list_del_init(&task->running);
|
|
|
|
rc = iscsi_xmit_task(conn, task, true);
|
|
|
|
if (rc)
|
|
|
|
goto done;
|
|
|
|
if (!list_empty(&conn->mgmtqueue))
|
|
|
|
goto check_mgmt;
|
|
|
|
}
|
|
|
|
|
2007-12-13 18:43:20 +00:00
|
|
|
/* process pending command queue */
|
2009-05-13 22:57:46 +00:00
|
|
|
while (!list_empty(&conn->cmdqueue)) {
|
2021-02-07 04:46:01 +00:00
|
|
|
task = list_entry(conn->cmdqueue.next, struct iscsi_task,
|
|
|
|
running);
|
|
|
|
list_del_init(&task->running);
|
2007-12-13 18:43:26 +00:00
|
|
|
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
|
2021-02-07 04:46:01 +00:00
|
|
|
fail_scsi_task(task, DID_IMM_RETRY);
|
2007-12-13 18:43:26 +00:00
|
|
|
continue;
|
|
|
|
}
|
2021-02-07 04:46:01 +00:00
|
|
|
rc = iscsi_prep_scsi_cmd_pdu(task);
|
2008-12-02 06:32:05 +00:00
|
|
|
if (rc) {
|
2021-02-07 04:46:00 +00:00
|
|
|
if (rc == -ENOMEM || rc == -EACCES)
|
2021-02-07 04:46:01 +00:00
|
|
|
fail_scsi_task(task, DID_IMM_RETRY);
|
2021-02-07 04:46:00 +00:00
|
|
|
else
|
2021-02-07 04:46:01 +00:00
|
|
|
fail_scsi_task(task, DID_ABORT);
|
2007-12-13 18:43:23 +00:00
|
|
|
continue;
|
|
|
|
}
|
2021-02-07 04:46:01 +00:00
|
|
|
rc = iscsi_xmit_task(conn, task, false);
|
2007-05-30 17:57:18 +00:00
|
|
|
if (rc)
|
2009-08-20 20:11:03 +00:00
|
|
|
goto done;
|
2007-05-30 17:57:18 +00:00
|
|
|
/*
|
2008-05-21 20:54:09 +00:00
|
|
|
* we could continuously get new task requests so
|
2007-05-30 17:57:18 +00:00
|
|
|
* we need to check the mgmt queue for nops that need to
|
|
|
|
* be sent to aviod starvation
|
|
|
|
*/
|
2007-12-13 18:43:20 +00:00
|
|
|
if (!list_empty(&conn->mgmtqueue))
|
|
|
|
goto check_mgmt;
|
2022-06-07 13:19:53 +00:00
|
|
|
if (!list_empty(&conn->requeue))
|
|
|
|
goto check_requeue;
|
2007-12-13 18:43:20 +00:00
|
|
|
}
|
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&conn->session->frwd_lock);
|
2006-05-30 05:37:28 +00:00
|
|
|
return -ENODATA;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2009-08-20 20:11:03 +00:00
|
|
|
done:
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&conn->session->frwd_lock);
|
2006-05-30 05:37:28 +00:00
|
|
|
return rc;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2006-11-22 14:57:56 +00:00
|
|
|
static void iscsi_xmitworker(struct work_struct *work)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2006-11-22 14:57:56 +00:00
|
|
|
struct iscsi_conn *conn =
|
|
|
|
container_of(work, struct iscsi_conn, xmitwork);
|
2006-05-30 05:37:28 +00:00
|
|
|
int rc;
|
2006-04-07 02:13:41 +00:00
|
|
|
/*
|
|
|
|
* serialize Xmit worker on a per-connection basis.
|
|
|
|
*/
|
2006-05-30 05:37:28 +00:00
|
|
|
do {
|
|
|
|
rc = iscsi_data_xmit(conn);
|
|
|
|
} while (rc >= 0 || rc == -EAGAIN);
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2008-12-02 06:32:05 +00:00
|
|
|
static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
|
|
|
|
struct scsi_cmnd *sc)
|
|
|
|
{
|
|
|
|
struct iscsi_task *task;
|
|
|
|
|
2009-12-21 22:37:28 +00:00
|
|
|
if (!kfifo_out(&conn->session->cmdpool.queue,
|
2008-12-02 06:32:05 +00:00
|
|
|
(void *) &task, sizeof(void *)))
|
|
|
|
return NULL;
|
|
|
|
|
2022-02-18 19:50:53 +00:00
|
|
|
iscsi_cmd(sc)->age = conn->session->age;
|
|
|
|
iscsi_cmd(sc)->task = task;
|
2008-12-02 06:32:05 +00:00
|
|
|
|
2017-03-09 11:46:58 +00:00
|
|
|
refcount_set(&task->refcount, 1);
|
2008-12-02 06:32:05 +00:00
|
|
|
task->state = ISCSI_TASK_PENDING;
|
|
|
|
task->conn = conn;
|
|
|
|
task->sc = sc;
|
2009-06-16 03:11:08 +00:00
|
|
|
task->have_checked_conn = false;
|
|
|
|
task->last_timeout = jiffies;
|
|
|
|
task->last_xfer = jiffies;
|
2014-03-05 17:43:49 +00:00
|
|
|
task->protected = false;
|
2008-12-02 06:32:05 +00:00
|
|
|
INIT_LIST_HEAD(&task->running);
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
enum {
|
|
|
|
FAILURE_BAD_HOST = 1,
|
|
|
|
FAILURE_SESSION_FAILED,
|
|
|
|
FAILURE_SESSION_FREED,
|
|
|
|
FAILURE_WINDOW_CLOSED,
|
2006-08-31 22:09:25 +00:00
|
|
|
FAILURE_OOM,
|
2006-04-07 02:13:41 +00:00
|
|
|
FAILURE_SESSION_TERMINATE,
|
2006-05-19 01:31:42 +00:00
|
|
|
FAILURE_SESSION_IN_RECOVERY,
|
2006-04-07 02:13:41 +00:00
|
|
|
FAILURE_SESSION_RECOVERY_TIMEOUT,
|
2007-12-13 18:43:26 +00:00
|
|
|
FAILURE_SESSION_LOGGING_OUT,
|
2008-01-31 19:36:43 +00:00
|
|
|
FAILURE_SESSION_NOT_READY,
|
2006-04-07 02:13:41 +00:00
|
|
|
};
|
|
|
|
|
2010-12-31 08:22:21 +00:00
|
|
|
int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2008-05-21 20:53:59 +00:00
|
|
|
struct iscsi_cls_session *cls_session;
|
2009-05-13 22:57:48 +00:00
|
|
|
struct iscsi_host *ihost;
|
2006-04-07 02:13:41 +00:00
|
|
|
int reason = 0;
|
|
|
|
struct iscsi_session *session;
|
|
|
|
struct iscsi_conn *conn;
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_task *task = NULL;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
|
|
|
sc->result = 0;
|
2022-02-18 19:50:53 +00:00
|
|
|
iscsi_cmd(sc)->task = NULL;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2009-05-13 22:57:48 +00:00
|
|
|
ihost = shost_priv(host);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:53:59 +00:00
|
|
|
cls_session = starget_to_session(scsi_target(sc->device));
|
|
|
|
session = cls_session->dd_data;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:53:59 +00:00
|
|
|
reason = iscsi_session_chkready(cls_session);
|
2008-01-31 19:36:43 +00:00
|
|
|
if (reason) {
|
|
|
|
sc->result = reason;
|
|
|
|
goto fault;
|
|
|
|
}
|
|
|
|
|
2009-05-13 22:57:47 +00:00
|
|
|
if (session->state != ISCSI_STATE_LOGGED_IN) {
|
2006-05-19 01:31:42 +00:00
|
|
|
/*
|
|
|
|
* to handle the race between when we set the recovery state
|
|
|
|
* and block the session we requeue here (commands could
|
|
|
|
* be entering our queuecommand while a block is starting
|
|
|
|
* up because the block code is not locked)
|
|
|
|
*/
|
2007-12-13 18:43:32 +00:00
|
|
|
switch (session->state) {
|
2009-05-13 22:57:47 +00:00
|
|
|
case ISCSI_STATE_FAILED:
|
scsi: libiscsi: Allow sd_shutdown on bad transport
If, for any reason, userland shuts down iscsi transport interfaces
before proper logouts - like when logging in to LUNs manually, without
logging out on server shutdown, or when automated scripts can't
umount/logout from logged LUNs - kernel will hang forever on its
sd_sync_cache() logic, after issuing the SYNCHRONIZE_CACHE cmd to all
still existent paths.
PID: 1 TASK: ffff8801a69b8000 CPU: 1 COMMAND: "systemd-shutdow"
#0 [ffff8801a69c3a30] __schedule at ffffffff8183e9ee
#1 [ffff8801a69c3a80] schedule at ffffffff8183f0d5
#2 [ffff8801a69c3a98] schedule_timeout at ffffffff81842199
#3 [ffff8801a69c3b40] io_schedule_timeout at ffffffff8183e604
#4 [ffff8801a69c3b70] wait_for_completion_io_timeout at ffffffff8183fc6c
#5 [ffff8801a69c3bd0] blk_execute_rq at ffffffff813cfe10
#6 [ffff8801a69c3c88] scsi_execute at ffffffff815c3fc7
#7 [ffff8801a69c3cc8] scsi_execute_req_flags at ffffffff815c60fe
#8 [ffff8801a69c3d30] sd_sync_cache at ffffffff815d37d7
#9 [ffff8801a69c3da8] sd_shutdown at ffffffff815d3c3c
This happens because iscsi_eh_cmd_timed_out(), the transport layer
timeout helper, would tell the queue timeout function (scsi_times_out)
to reset the request timer over and over, until the session state is
back to logged in state. Unfortunately, during server shutdown, this
might never happen again.
Other option would be "not to handle" the issue in the transport
layer. That would trigger the error handler logic, which would also need
the session state to be logged in again.
Best option, for such case, is to tell upper layers that the command was
handled during the transport layer error handler helper, marking it as
DID_NO_CONNECT, which will allow completion and inform about the
problem.
After the session was marked as ISCSI_STATE_FAILED, due to the first
timeout during the server shutdown phase, all subsequent cmds will fail
to be queued, allowing upper logic to fail faster.
Signed-off-by: Rafael David Tinoco <rafael.tinoco@canonical.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-07 21:59:13 +00:00
|
|
|
/*
|
|
|
|
* cmds should fail during shutdown, if the session
|
|
|
|
* state is bad, allowing completion to happen
|
|
|
|
*/
|
|
|
|
if (unlikely(system_state != SYSTEM_RUNNING)) {
|
|
|
|
reason = FAILURE_SESSION_FAILED;
|
|
|
|
sc->result = DID_NO_CONNECT << 16;
|
|
|
|
break;
|
|
|
|
}
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2007-12-13 18:43:32 +00:00
|
|
|
case ISCSI_STATE_IN_RECOVERY:
|
2006-05-19 01:31:42 +00:00
|
|
|
reason = FAILURE_SESSION_IN_RECOVERY;
|
2009-05-13 22:57:47 +00:00
|
|
|
sc->result = DID_IMM_RETRY << 16;
|
|
|
|
break;
|
2007-12-13 18:43:32 +00:00
|
|
|
case ISCSI_STATE_LOGGING_OUT:
|
|
|
|
reason = FAILURE_SESSION_LOGGING_OUT;
|
2009-05-13 22:57:47 +00:00
|
|
|
sc->result = DID_IMM_RETRY << 16;
|
|
|
|
break;
|
2007-12-13 18:43:26 +00:00
|
|
|
case ISCSI_STATE_RECOVERY_FAILED:
|
2006-05-19 01:31:42 +00:00
|
|
|
reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
|
2008-08-19 23:45:26 +00:00
|
|
|
sc->result = DID_TRANSPORT_FAILFAST << 16;
|
2007-12-13 18:43:26 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_STATE_TERMINATE:
|
2006-05-19 01:31:42 +00:00
|
|
|
reason = FAILURE_SESSION_TERMINATE;
|
2008-01-31 19:36:43 +00:00
|
|
|
sc->result = DID_NO_CONNECT << 16;
|
2007-12-13 18:43:26 +00:00
|
|
|
break;
|
|
|
|
default:
|
2006-05-19 01:31:42 +00:00
|
|
|
reason = FAILURE_SESSION_FREED;
|
2008-01-31 19:36:43 +00:00
|
|
|
sc->result = DID_NO_CONNECT << 16;
|
2007-12-13 18:43:26 +00:00
|
|
|
}
|
2006-04-07 02:13:41 +00:00
|
|
|
goto fault;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn = session->leadconn;
|
2006-10-16 22:09:39 +00:00
|
|
|
if (!conn) {
|
|
|
|
reason = FAILURE_SESSION_FREED;
|
2008-01-31 19:36:43 +00:00
|
|
|
sc->result = DID_NO_CONNECT << 16;
|
2006-10-16 22:09:39 +00:00
|
|
|
goto fault;
|
|
|
|
}
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2022-04-08 00:13:11 +00:00
|
|
|
if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
|
2009-09-05 02:05:33 +00:00
|
|
|
reason = FAILURE_SESSION_IN_RECOVERY;
|
2017-10-09 11:33:19 +00:00
|
|
|
sc->result = DID_REQUEUE << 16;
|
2009-09-05 02:05:33 +00:00
|
|
|
goto fault;
|
|
|
|
}
|
|
|
|
|
2007-05-30 17:57:18 +00:00
|
|
|
if (iscsi_check_cmdsn_window_closed(conn)) {
|
|
|
|
reason = FAILURE_WINDOW_CLOSED;
|
|
|
|
goto reject;
|
|
|
|
}
|
|
|
|
|
2008-12-02 06:32:05 +00:00
|
|
|
task = iscsi_alloc_task(conn, sc);
|
|
|
|
if (!task) {
|
2006-08-31 22:09:25 +00:00
|
|
|
reason = FAILURE_OOM;
|
|
|
|
goto reject;
|
|
|
|
}
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2009-05-13 22:57:48 +00:00
|
|
|
if (!ihost->workq) {
|
2008-12-02 06:32:05 +00:00
|
|
|
reason = iscsi_prep_scsi_cmd_pdu(task);
|
|
|
|
if (reason) {
|
2009-11-11 22:34:32 +00:00
|
|
|
if (reason == -ENOMEM || reason == -EACCES) {
|
2008-12-02 06:32:05 +00:00
|
|
|
reason = FAILURE_OOM;
|
|
|
|
goto prepd_reject;
|
|
|
|
} else {
|
|
|
|
sc->result = DID_ABORT << 16;
|
|
|
|
goto prepd_fault;
|
|
|
|
}
|
2008-05-21 20:54:05 +00:00
|
|
|
}
|
2008-05-21 20:54:09 +00:00
|
|
|
if (session->tt->xmit_task(task)) {
|
2009-08-20 20:10:58 +00:00
|
|
|
session->cmdsn--;
|
2008-05-21 20:54:05 +00:00
|
|
|
reason = FAILURE_SESSION_NOT_READY;
|
2008-12-02 06:32:05 +00:00
|
|
|
goto prepd_reject;
|
2008-05-21 20:54:05 +00:00
|
|
|
}
|
2009-05-13 22:57:46 +00:00
|
|
|
} else {
|
|
|
|
list_add_tail(&task->running, &conn->cmdqueue);
|
2022-06-16 22:45:49 +00:00
|
|
|
iscsi_conn_queue_xmit(conn);
|
2009-05-13 22:57:46 +00:00
|
|
|
}
|
2008-05-21 20:54:05 +00:00
|
|
|
|
|
|
|
session->queued_cmdsn++;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
return 0;
|
|
|
|
|
2008-12-02 06:32:05 +00:00
|
|
|
prepd_reject:
|
2019-02-25 17:41:30 +00:00
|
|
|
spin_lock_bh(&session->back_lock);
|
2010-12-31 08:22:21 +00:00
|
|
|
iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
|
2019-02-25 17:41:30 +00:00
|
|
|
spin_unlock_bh(&session->back_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
reject:
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
|
|
|
|
sc->cmnd[0], reason);
|
2008-08-17 20:24:43 +00:00
|
|
|
return SCSI_MLQUEUE_TARGET_BUSY;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-12-02 06:32:05 +00:00
|
|
|
prepd_fault:
|
2019-02-25 17:41:30 +00:00
|
|
|
spin_lock_bh(&session->back_lock);
|
2010-12-31 08:22:21 +00:00
|
|
|
iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
|
2019-02-25 17:41:30 +00:00
|
|
|
spin_unlock_bh(&session->back_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
fault:
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
|
|
|
|
sc->cmnd[0], reason);
|
2019-01-29 08:33:07 +00:00
|
|
|
scsi_set_resid(sc, scsi_bufflen(sc));
|
2021-10-07 20:28:39 +00:00
|
|
|
scsi_done(sc);
|
2006-04-07 02:13:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_queuecommand);
|
|
|
|
|
2009-04-21 20:32:32 +00:00
|
|
|
int iscsi_target_alloc(struct scsi_target *starget)
|
|
|
|
{
|
|
|
|
struct iscsi_cls_session *cls_session = starget_to_session(starget);
|
|
|
|
struct iscsi_session *session = cls_session->dd_data;
|
|
|
|
|
|
|
|
starget->can_queue = session->scsi_cmds_max;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_target_alloc);
|
|
|
|
|
2017-10-11 23:25:40 +00:00
|
|
|
static void iscsi_tmf_timedout(struct timer_list *t)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2021-05-25 18:18:06 +00:00
|
|
|
struct iscsi_session *session = from_timer(session, t, tmf_timer);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock(&session->frwd_lock);
|
2021-05-25 18:18:06 +00:00
|
|
|
if (session->tmf_state == TMF_QUEUED) {
|
|
|
|
session->tmf_state = TMF_TIMEDOUT;
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "tmf timedout\n");
|
2006-04-07 02:13:41 +00:00
|
|
|
/* unblock eh_abort() */
|
2021-05-25 18:18:06 +00:00
|
|
|
wake_up(&session->ehwait);
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock(&session->frwd_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
|
2007-12-13 18:43:30 +00:00
|
|
|
struct iscsi_tm *hdr, int age,
|
|
|
|
int timeout)
|
2018-07-30 21:40:18 +00:00
|
|
|
__must_hold(&session->frwd_lock)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
|
|
|
struct iscsi_session *session = conn->session;
|
|
|
|
|
2022-06-16 22:45:57 +00:00
|
|
|
if (__iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0)) {
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2010-12-31 08:22:18 +00:00
|
|
|
iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
|
2006-04-07 02:13:41 +00:00
|
|
|
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2007-05-30 17:57:18 +00:00
|
|
|
return -EPERM;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
2007-12-13 18:43:20 +00:00
|
|
|
conn->tmfcmd_pdus_cnt++;
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_timer.expires = timeout * HZ + jiffies;
|
|
|
|
add_timer(&session->tmf_timer);
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "tmf set timeout\n");
|
2006-04-07 02:13:41 +00:00
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-08-15 06:38:30 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* block eh thread until:
|
|
|
|
*
|
2007-12-13 18:43:20 +00:00
|
|
|
* 1) tmf response
|
|
|
|
* 2) tmf timeout
|
2006-04-07 02:13:41 +00:00
|
|
|
* 3) session is terminated or restarted or userspace has
|
|
|
|
* given up on recovery
|
|
|
|
*/
|
2021-05-25 18:18:06 +00:00
|
|
|
wait_event_interruptible(session->ehwait, age != session->age ||
|
2006-04-07 02:13:41 +00:00
|
|
|
session->state != ISCSI_STATE_LOGGED_IN ||
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state != TMF_QUEUED);
|
2006-04-07 02:13:41 +00:00
|
|
|
if (signal_pending(current))
|
|
|
|
flush_signals(current);
|
2021-05-25 18:18:06 +00:00
|
|
|
del_timer_sync(&session->tmf_timer);
|
2007-12-13 18:43:20 +00:00
|
|
|
|
2007-08-15 06:38:30 +00:00
|
|
|
mutex_lock(&session->eh_mutex);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2008-05-21 20:54:09 +00:00
|
|
|
/* if the session drops it will clean up the task */
|
2007-12-13 18:43:20 +00:00
|
|
|
if (age != session->age ||
|
|
|
|
session->state != ISCSI_STATE_LOGGED_IN)
|
|
|
|
return -ENOTCONN;
|
2006-04-07 02:13:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-12-13 18:43:20 +00:00
|
|
|
/*
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
* Fail commands. session frwd lock held and xmit thread flushed.
|
2007-12-13 18:43:20 +00:00
|
|
|
*/
|
2014-06-25 13:27:36 +00:00
|
|
|
static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
|
2007-12-13 18:43:20 +00:00
|
|
|
{
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
struct iscsi_session *session = conn->session;
|
2009-05-13 22:57:46 +00:00
|
|
|
struct iscsi_task *task;
|
|
|
|
int i;
|
2007-12-13 18:43:20 +00:00
|
|
|
|
2022-06-16 22:45:55 +00:00
|
|
|
restart_cmd_loop:
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
spin_lock_bh(&session->back_lock);
|
|
|
|
for (i = 0; i < session->cmds_max; i++) {
|
|
|
|
task = session->cmds[i];
|
2009-05-13 22:57:46 +00:00
|
|
|
if (!task->sc || task->state == ISCSI_TASK_FREE)
|
|
|
|
continue;
|
2007-12-13 18:43:20 +00:00
|
|
|
|
2009-05-13 22:57:46 +00:00
|
|
|
if (lun != -1 && lun != task->sc->device->lun)
|
|
|
|
continue;
|
2022-06-16 22:45:55 +00:00
|
|
|
/*
|
|
|
|
* The cmd is completing but if this is called from an eh
|
|
|
|
* callout path then when we return scsi-ml owns the cmd. Wait
|
|
|
|
* for the completion path to finish freeing the cmd.
|
|
|
|
*/
|
|
|
|
if (!iscsi_get_task(task)) {
|
|
|
|
spin_unlock_bh(&session->back_lock);
|
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
|
|
|
udelay(ISCSI_CMD_COMPL_WAIT);
|
|
|
|
spin_lock_bh(&session->frwd_lock);
|
|
|
|
goto restart_cmd_loop;
|
|
|
|
}
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
|
|
|
|
ISCSI_DBG_SESSION(session,
|
2009-05-13 22:57:46 +00:00
|
|
|
"failing sc %p itt 0x%x state %d\n",
|
|
|
|
task->sc, task->itt, task->state);
|
2022-06-16 22:45:55 +00:00
|
|
|
__fail_scsi_task(task, error);
|
|
|
|
__iscsi_put_task(task);
|
2007-12-13 18:43:20 +00:00
|
|
|
}
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
spin_unlock_bh(&session->back_lock);
|
2007-12-13 18:43:20 +00:00
|
|
|
}
|
|
|
|
|
2009-09-05 02:05:33 +00:00
|
|
|
/**
|
|
|
|
* iscsi_suspend_queue - suspend iscsi_queuecommand
|
|
|
|
* @conn: iscsi conn to stop queueing IO on
|
|
|
|
*
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
* This grabs the session frwd_lock to make sure no one is in
|
2009-09-05 02:05:33 +00:00
|
|
|
* xmit_task/queuecommand, and then sets suspend to prevent
|
|
|
|
* new commands from being queued. This only needs to be called
|
|
|
|
* by offload drivers that need to sync a path like ep disconnect
|
|
|
|
* with the iscsi_queuecommand/xmit_task. To start IO again libiscsi
|
|
|
|
* will call iscsi_start_tx and iscsi_unblock_session when in FFP.
|
|
|
|
*/
|
|
|
|
void iscsi_suspend_queue(struct iscsi_conn *conn)
|
|
|
|
{
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&conn->session->frwd_lock);
|
2022-04-08 00:13:11 +00:00
|
|
|
set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&conn->session->frwd_lock);
|
2009-09-05 02:05:33 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iscsi_suspend_tx - suspend iscsi_data_xmit
|
2022-06-16 22:45:50 +00:00
|
|
|
* @conn: iscsi conn to stop processing IO on.
|
2009-09-05 02:05:33 +00:00
|
|
|
*
|
|
|
|
* This function sets the suspend bit to prevent iscsi_data_xmit
|
|
|
|
* from sending new IO, and if work is queued on the xmit thread
|
|
|
|
* it will wait for it to be completed.
|
|
|
|
*/
|
2008-05-21 20:54:03 +00:00
|
|
|
void iscsi_suspend_tx(struct iscsi_conn *conn)
|
2007-08-15 06:38:30 +00:00
|
|
|
{
|
2009-03-05 20:46:03 +00:00
|
|
|
struct Scsi_Host *shost = conn->session->host;
|
|
|
|
struct iscsi_host *ihost = shost_priv(shost);
|
|
|
|
|
2022-04-08 00:13:11 +00:00
|
|
|
set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
|
2009-05-13 22:57:48 +00:00
|
|
|
if (ihost->workq)
|
2022-06-16 22:45:50 +00:00
|
|
|
flush_work(&conn->xmitwork);
|
2007-08-15 06:38:30 +00:00
|
|
|
}
|
2008-05-21 20:54:03 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
|
2007-08-15 06:38:30 +00:00
|
|
|
|
|
|
|
static void iscsi_start_tx(struct iscsi_conn *conn)
|
|
|
|
{
|
2022-04-08 00:13:11 +00:00
|
|
|
clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
|
2022-06-16 22:45:49 +00:00
|
|
|
iscsi_conn_queue_xmit(conn);
|
2007-08-15 06:38:30 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 22:45:50 +00:00
|
|
|
/**
|
|
|
|
* iscsi_suspend_rx - Prevent recvwork from running again.
|
|
|
|
* @conn: iscsi conn to stop.
|
|
|
|
*/
|
|
|
|
void iscsi_suspend_rx(struct iscsi_conn *conn)
|
|
|
|
{
|
|
|
|
struct Scsi_Host *shost = conn->session->host;
|
|
|
|
struct iscsi_host *ihost = shost_priv(shost);
|
|
|
|
|
|
|
|
set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
|
|
|
|
if (ihost->workq)
|
|
|
|
flush_work(&conn->recvwork);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_suspend_rx);
|
|
|
|
|
2009-05-13 22:57:45 +00:00
|
|
|
/*
|
|
|
|
* We want to make sure a ping is in flight. It has timed out.
|
|
|
|
* And we are not busy processing a pdu that is making
|
|
|
|
* progress but got started before the ping and is taking a while
|
|
|
|
* to complete so the ping is just stuck behind it in a queue.
|
|
|
|
*/
|
|
|
|
static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
|
|
|
|
{
|
2020-11-06 19:33:17 +00:00
|
|
|
if (READ_ONCE(conn->ping_task) &&
|
2009-05-13 22:57:45 +00:00
|
|
|
time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
|
|
|
|
(conn->ping_timeout * HZ), jiffies))
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-10-18 20:29:50 +00:00
|
|
|
enum scsi_timeout_action iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
|
2007-12-13 18:43:30 +00:00
|
|
|
{
|
2022-10-18 20:29:50 +00:00
|
|
|
enum scsi_timeout_action rc = SCSI_EH_NOT_HANDLED;
|
2010-02-10 22:51:45 +00:00
|
|
|
struct iscsi_task *task = NULL, *running_task;
|
2007-12-13 18:43:30 +00:00
|
|
|
struct iscsi_cls_session *cls_session;
|
|
|
|
struct iscsi_session *session;
|
|
|
|
struct iscsi_conn *conn;
|
2010-02-10 22:51:45 +00:00
|
|
|
int i;
|
2007-12-13 18:43:30 +00:00
|
|
|
|
2009-06-16 03:11:08 +00:00
|
|
|
cls_session = starget_to_session(scsi_target(sc->device));
|
2008-05-21 20:53:59 +00:00
|
|
|
session = cls_session->dd_data;
|
2007-12-13 18:43:30 +00:00
|
|
|
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
|
2007-12-13 18:43:30 +00:00
|
|
|
|
2019-12-09 17:34:57 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
spin_lock(&session->back_lock);
|
2022-02-18 19:50:53 +00:00
|
|
|
task = iscsi_cmd(sc)->task;
|
2012-01-27 03:13:11 +00:00
|
|
|
if (!task) {
|
|
|
|
/*
|
|
|
|
* Raced with completion. Blk layer has taken ownership
|
|
|
|
* so let timeout code complete it now.
|
|
|
|
*/
|
2022-10-18 20:29:50 +00:00
|
|
|
rc = SCSI_EH_NOT_HANDLED;
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
spin_unlock(&session->back_lock);
|
2012-01-27 03:13:11 +00:00
|
|
|
goto done;
|
|
|
|
}
|
2022-06-16 22:45:55 +00:00
|
|
|
if (!iscsi_get_task(task)) {
|
|
|
|
/*
|
|
|
|
* Racing with the completion path right now, so give it more
|
|
|
|
* time so that path can complete it like normal.
|
|
|
|
*/
|
2022-10-18 20:29:50 +00:00
|
|
|
rc = SCSI_EH_RESET_TIMER;
|
2022-06-16 22:45:55 +00:00
|
|
|
task = NULL;
|
|
|
|
spin_unlock(&session->back_lock);
|
|
|
|
goto done;
|
|
|
|
}
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
spin_unlock(&session->back_lock);
|
2012-01-27 03:13:11 +00:00
|
|
|
|
2007-12-13 18:43:30 +00:00
|
|
|
if (session->state != ISCSI_STATE_LOGGED_IN) {
|
scsi: libiscsi: Allow sd_shutdown on bad transport
If, for any reason, userland shuts down iscsi transport interfaces
before proper logouts - like when logging in to LUNs manually, without
logging out on server shutdown, or when automated scripts can't
umount/logout from logged LUNs - kernel will hang forever on its
sd_sync_cache() logic, after issuing the SYNCHRONIZE_CACHE cmd to all
still existent paths.
PID: 1 TASK: ffff8801a69b8000 CPU: 1 COMMAND: "systemd-shutdow"
#0 [ffff8801a69c3a30] __schedule at ffffffff8183e9ee
#1 [ffff8801a69c3a80] schedule at ffffffff8183f0d5
#2 [ffff8801a69c3a98] schedule_timeout at ffffffff81842199
#3 [ffff8801a69c3b40] io_schedule_timeout at ffffffff8183e604
#4 [ffff8801a69c3b70] wait_for_completion_io_timeout at ffffffff8183fc6c
#5 [ffff8801a69c3bd0] blk_execute_rq at ffffffff813cfe10
#6 [ffff8801a69c3c88] scsi_execute at ffffffff815c3fc7
#7 [ffff8801a69c3cc8] scsi_execute_req_flags at ffffffff815c60fe
#8 [ffff8801a69c3d30] sd_sync_cache at ffffffff815d37d7
#9 [ffff8801a69c3da8] sd_shutdown at ffffffff815d3c3c
This happens because iscsi_eh_cmd_timed_out(), the transport layer
timeout helper, would tell the queue timeout function (scsi_times_out)
to reset the request timer over and over, until the session state is
back to logged in state. Unfortunately, during server shutdown, this
might never happen again.
Other option would be "not to handle" the issue in the transport
layer. That would trigger the error handler logic, which would also need
the session state to be logged in again.
Best option, for such case, is to tell upper layers that the command was
handled during the transport layer error handler helper, marking it as
DID_NO_CONNECT, which will allow completion and inform about the
problem.
After the session was marked as ISCSI_STATE_FAILED, due to the first
timeout during the server shutdown phase, all subsequent cmds will fail
to be queued, allowing upper logic to fail faster.
Signed-off-by: Rafael David Tinoco <rafael.tinoco@canonical.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-07 21:59:13 +00:00
|
|
|
/*
|
|
|
|
* During shutdown, if session is prematurely disconnected,
|
|
|
|
* recovery won't happen and there will be hung cmds. Not
|
|
|
|
* handling cmds would trigger EH, also bad in this case.
|
|
|
|
* Instead, handle cmd, allow completion to happen and let
|
|
|
|
* upper layer to deal with the result.
|
|
|
|
*/
|
|
|
|
if (unlikely(system_state != SYSTEM_RUNNING)) {
|
|
|
|
sc->result = DID_NO_CONNECT << 16;
|
|
|
|
ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
|
2022-10-18 20:29:50 +00:00
|
|
|
rc = SCSI_EH_NOT_HANDLED;
|
scsi: libiscsi: Allow sd_shutdown on bad transport
If, for any reason, userland shuts down iscsi transport interfaces
before proper logouts - like when logging in to LUNs manually, without
logging out on server shutdown, or when automated scripts can't
umount/logout from logged LUNs - kernel will hang forever on its
sd_sync_cache() logic, after issuing the SYNCHRONIZE_CACHE cmd to all
still existent paths.
PID: 1 TASK: ffff8801a69b8000 CPU: 1 COMMAND: "systemd-shutdow"
#0 [ffff8801a69c3a30] __schedule at ffffffff8183e9ee
#1 [ffff8801a69c3a80] schedule at ffffffff8183f0d5
#2 [ffff8801a69c3a98] schedule_timeout at ffffffff81842199
#3 [ffff8801a69c3b40] io_schedule_timeout at ffffffff8183e604
#4 [ffff8801a69c3b70] wait_for_completion_io_timeout at ffffffff8183fc6c
#5 [ffff8801a69c3bd0] blk_execute_rq at ffffffff813cfe10
#6 [ffff8801a69c3c88] scsi_execute at ffffffff815c3fc7
#7 [ffff8801a69c3cc8] scsi_execute_req_flags at ffffffff815c60fe
#8 [ffff8801a69c3d30] sd_sync_cache at ffffffff815d37d7
#9 [ffff8801a69c3da8] sd_shutdown at ffffffff815d3c3c
This happens because iscsi_eh_cmd_timed_out(), the transport layer
timeout helper, would tell the queue timeout function (scsi_times_out)
to reset the request timer over and over, until the session state is
back to logged in state. Unfortunately, during server shutdown, this
might never happen again.
Other option would be "not to handle" the issue in the transport
layer. That would trigger the error handler logic, which would also need
the session state to be logged in again.
Best option, for such case, is to tell upper layers that the command was
handled during the transport layer error handler helper, marking it as
DID_NO_CONNECT, which will allow completion and inform about the
problem.
After the session was marked as ISCSI_STATE_FAILED, due to the first
timeout during the server shutdown phase, all subsequent cmds will fail
to be queued, allowing upper logic to fail faster.
Signed-off-by: Rafael David Tinoco <rafael.tinoco@canonical.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-07 21:59:13 +00:00
|
|
|
goto done;
|
|
|
|
}
|
2007-12-13 18:43:30 +00:00
|
|
|
/*
|
|
|
|
* We are probably in the middle of iscsi recovery so let
|
|
|
|
* that complete and handle the error.
|
|
|
|
*/
|
2022-10-18 20:29:50 +00:00
|
|
|
rc = SCSI_EH_RESET_TIMER;
|
2007-12-13 18:43:30 +00:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn = session->leadconn;
|
|
|
|
if (!conn) {
|
|
|
|
/* In the middle of shuting down */
|
2022-10-18 20:29:50 +00:00
|
|
|
rc = SCSI_EH_RESET_TIMER;
|
2007-12-13 18:43:30 +00:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2009-06-16 03:11:08 +00:00
|
|
|
/*
|
|
|
|
* If we have sent (at least queued to the network layer) a pdu or
|
|
|
|
* recvd one for the task since the last timeout ask for
|
|
|
|
* more time. If on the next timeout we have not made progress
|
|
|
|
* we can check if it is the task or connection when we send the
|
|
|
|
* nop as a ping.
|
|
|
|
*/
|
2010-02-10 22:51:45 +00:00
|
|
|
if (time_after(task->last_xfer, task->last_timeout)) {
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "Command making progress. Asking "
|
|
|
|
"scsi-ml for more time to complete. "
|
2010-02-10 22:51:45 +00:00
|
|
|
"Last data xfer at %lu. Last timeout was at "
|
2009-06-16 03:11:10 +00:00
|
|
|
"%lu\n.", task->last_xfer, task->last_timeout);
|
2009-06-16 03:11:08 +00:00
|
|
|
task->have_checked_conn = false;
|
2022-10-18 20:29:50 +00:00
|
|
|
rc = SCSI_EH_RESET_TIMER;
|
2009-06-16 03:11:08 +00:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2007-12-13 18:43:30 +00:00
|
|
|
if (!conn->recv_timeout && !conn->ping_timeout)
|
|
|
|
goto done;
|
|
|
|
/*
|
|
|
|
* if the ping timedout then we are in the middle of cleaning up
|
|
|
|
* and can let the iscsi eh handle it
|
|
|
|
*/
|
2009-05-13 22:57:45 +00:00
|
|
|
if (iscsi_has_ping_timed_out(conn)) {
|
2022-10-18 20:29:50 +00:00
|
|
|
rc = SCSI_EH_RESET_TIMER;
|
2009-05-13 22:57:45 +00:00
|
|
|
goto done;
|
|
|
|
}
|
2009-06-16 03:11:08 +00:00
|
|
|
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
spin_lock(&session->back_lock);
|
2010-02-10 22:51:45 +00:00
|
|
|
for (i = 0; i < conn->session->cmds_max; i++) {
|
|
|
|
running_task = conn->session->cmds[i];
|
|
|
|
if (!running_task->sc || running_task == task ||
|
|
|
|
running_task->state != ISCSI_TASK_RUNNING)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only check if cmds started before this one have made
|
|
|
|
* progress, or this could never fail
|
|
|
|
*/
|
|
|
|
if (time_after(running_task->sc->jiffies_at_alloc,
|
|
|
|
task->sc->jiffies_at_alloc))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (time_after(running_task->last_xfer, task->last_timeout)) {
|
|
|
|
/*
|
|
|
|
* This task has not made progress, but a task
|
|
|
|
* started before us has transferred data since
|
|
|
|
* we started/last-checked. We could be queueing
|
|
|
|
* too many tasks or the LU is bad.
|
|
|
|
*
|
|
|
|
* If the device is bad the cmds ahead of us on
|
|
|
|
* other devs will complete, and this loop will
|
|
|
|
* eventually fail starting the scsi eh.
|
|
|
|
*/
|
|
|
|
ISCSI_DBG_EH(session, "Command has not made progress "
|
|
|
|
"but commands ahead of it have. "
|
|
|
|
"Asking scsi-ml for more time to "
|
|
|
|
"complete. Our last xfer vs running task "
|
|
|
|
"last xfer %lu/%lu. Last check %lu.\n",
|
|
|
|
task->last_xfer, running_task->last_xfer,
|
|
|
|
task->last_timeout);
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
spin_unlock(&session->back_lock);
|
2022-10-18 20:29:50 +00:00
|
|
|
rc = SCSI_EH_RESET_TIMER;
|
2010-02-10 22:51:45 +00:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
spin_unlock(&session->back_lock);
|
2010-02-10 22:51:45 +00:00
|
|
|
|
2009-06-16 03:11:08 +00:00
|
|
|
/* Assumes nop timeout is shorter than scsi cmd timeout */
|
|
|
|
if (task->have_checked_conn)
|
|
|
|
goto done;
|
|
|
|
|
2007-12-13 18:43:30 +00:00
|
|
|
/*
|
2009-06-16 03:11:08 +00:00
|
|
|
* Checking the transport already or nop from a cmd timeout still
|
|
|
|
* running
|
2007-12-13 18:43:30 +00:00
|
|
|
*/
|
2020-11-06 19:33:17 +00:00
|
|
|
if (READ_ONCE(conn->ping_task)) {
|
2009-06-16 03:11:08 +00:00
|
|
|
task->have_checked_conn = true;
|
2022-10-18 20:29:50 +00:00
|
|
|
rc = SCSI_EH_RESET_TIMER;
|
2009-05-13 22:57:45 +00:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2009-06-16 03:11:08 +00:00
|
|
|
/* Make sure there is a transport check done */
|
|
|
|
iscsi_send_nopout(conn, NULL);
|
|
|
|
task->have_checked_conn = true;
|
2022-10-18 20:29:50 +00:00
|
|
|
rc = SCSI_EH_RESET_TIMER;
|
2009-06-16 03:11:08 +00:00
|
|
|
|
2007-12-13 18:43:30 +00:00
|
|
|
done:
|
2019-12-09 17:34:57 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
|
|
|
|
if (task) {
|
|
|
|
task->last_timeout = jiffies;
|
|
|
|
iscsi_put_task(task);
|
|
|
|
}
|
2022-10-18 20:29:50 +00:00
|
|
|
ISCSI_DBG_EH(session, "return %s\n", rc == SCSI_EH_RESET_TIMER ?
|
scsi: libiscsi: Allow sd_shutdown on bad transport
If, for any reason, userland shuts down iscsi transport interfaces
before proper logouts - like when logging in to LUNs manually, without
logging out on server shutdown, or when automated scripts can't
umount/logout from logged LUNs - kernel will hang forever on its
sd_sync_cache() logic, after issuing the SYNCHRONIZE_CACHE cmd to all
still existent paths.
PID: 1 TASK: ffff8801a69b8000 CPU: 1 COMMAND: "systemd-shutdow"
#0 [ffff8801a69c3a30] __schedule at ffffffff8183e9ee
#1 [ffff8801a69c3a80] schedule at ffffffff8183f0d5
#2 [ffff8801a69c3a98] schedule_timeout at ffffffff81842199
#3 [ffff8801a69c3b40] io_schedule_timeout at ffffffff8183e604
#4 [ffff8801a69c3b70] wait_for_completion_io_timeout at ffffffff8183fc6c
#5 [ffff8801a69c3bd0] blk_execute_rq at ffffffff813cfe10
#6 [ffff8801a69c3c88] scsi_execute at ffffffff815c3fc7
#7 [ffff8801a69c3cc8] scsi_execute_req_flags at ffffffff815c60fe
#8 [ffff8801a69c3d30] sd_sync_cache at ffffffff815d37d7
#9 [ffff8801a69c3da8] sd_shutdown at ffffffff815d3c3c
This happens because iscsi_eh_cmd_timed_out(), the transport layer
timeout helper, would tell the queue timeout function (scsi_times_out)
to reset the request timer over and over, until the session state is
back to logged in state. Unfortunately, during server shutdown, this
might never happen again.
Other option would be "not to handle" the issue in the transport
layer. That would trigger the error handler logic, which would also need
the session state to be logged in again.
Best option, for such case, is to tell upper layers that the command was
handled during the transport layer error handler helper, marking it as
DID_NO_CONNECT, which will allow completion and inform about the
problem.
After the session was marked as ISCSI_STATE_FAILED, due to the first
timeout during the server shutdown phase, all subsequent cmds will fail
to be queued, allowing upper logic to fail faster.
Signed-off-by: Rafael David Tinoco <rafael.tinoco@canonical.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-07 21:59:13 +00:00
|
|
|
"timer reset" : "shutdown or nh");
|
2007-12-13 18:43:30 +00:00
|
|
|
return rc;
|
|
|
|
}
|
2017-01-30 12:18:58 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
|
2007-12-13 18:43:30 +00:00
|
|
|
|
2017-10-11 23:25:40 +00:00
|
|
|
static void iscsi_check_transport_timeouts(struct timer_list *t)
|
2007-12-13 18:43:30 +00:00
|
|
|
{
|
2017-10-11 23:25:40 +00:00
|
|
|
struct iscsi_conn *conn = from_timer(conn, t, transport_timer);
|
2007-12-13 18:43:30 +00:00
|
|
|
struct iscsi_session *session = conn->session;
|
2008-05-08 01:43:52 +00:00
|
|
|
unsigned long recv_timeout, next_timeout = 0, last_recv;
|
2007-12-13 18:43:30 +00:00
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock(&session->frwd_lock);
|
2007-12-13 18:43:30 +00:00
|
|
|
if (session->state != ISCSI_STATE_LOGGED_IN)
|
|
|
|
goto done;
|
|
|
|
|
2008-05-08 01:43:52 +00:00
|
|
|
recv_timeout = conn->recv_timeout;
|
|
|
|
if (!recv_timeout)
|
2007-12-13 18:43:30 +00:00
|
|
|
goto done;
|
|
|
|
|
2008-05-08 01:43:52 +00:00
|
|
|
recv_timeout *= HZ;
|
2007-12-13 18:43:30 +00:00
|
|
|
last_recv = conn->last_recv;
|
2009-05-13 22:57:45 +00:00
|
|
|
|
|
|
|
if (iscsi_has_ping_timed_out(conn)) {
|
2008-01-31 19:36:52 +00:00
|
|
|
iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
|
2009-05-13 22:57:45 +00:00
|
|
|
"expired, recv timeout %d, last rx %lu, "
|
|
|
|
"last ping %lu, now %lu\n",
|
|
|
|
conn->ping_timeout, conn->recv_timeout,
|
|
|
|
last_recv, conn->last_ping, jiffies);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock(&session->frwd_lock);
|
2014-07-12 20:51:51 +00:00
|
|
|
iscsi_conn_failure(conn, ISCSI_ERR_NOP_TIMEDOUT);
|
2007-12-13 18:43:30 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-05-08 01:43:52 +00:00
|
|
|
if (time_before_eq(last_recv + recv_timeout, jiffies)) {
|
2008-05-09 01:15:34 +00:00
|
|
|
/* send a ping to try to provoke some traffic */
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
|
2015-09-03 16:49:55 +00:00
|
|
|
if (iscsi_send_nopout(conn, NULL))
|
|
|
|
next_timeout = jiffies + (1 * HZ);
|
|
|
|
else
|
|
|
|
next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
|
2008-01-31 19:36:50 +00:00
|
|
|
} else
|
2008-05-08 01:43:52 +00:00
|
|
|
next_timeout = last_recv + recv_timeout;
|
2007-12-13 18:43:30 +00:00
|
|
|
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
|
2008-01-31 19:36:50 +00:00
|
|
|
mod_timer(&conn->transport_timer, next_timeout);
|
2007-12-13 18:43:30 +00:00
|
|
|
done:
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock(&session->frwd_lock);
|
2007-12-13 18:43:30 +00:00
|
|
|
}
|
|
|
|
|
2021-05-25 18:17:55 +00:00
|
|
|
/**
|
|
|
|
* iscsi_conn_unbind - prevent queueing to conn.
|
|
|
|
* @cls_conn: iscsi conn ep is bound to.
|
|
|
|
* @is_active: is the conn in use for boot or is this for EH/termination
|
|
|
|
*
|
|
|
|
* This must be called by drivers implementing the ep_disconnect callout.
|
|
|
|
* It disables queueing to the connection from libiscsi in preparation for
|
|
|
|
* an ep_disconnect call.
|
|
|
|
*/
|
|
|
|
void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
|
|
|
|
{
|
|
|
|
struct iscsi_session *session;
|
|
|
|
struct iscsi_conn *conn;
|
|
|
|
|
|
|
|
if (!cls_conn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
conn = cls_conn->dd_data;
|
|
|
|
session = conn->session;
|
|
|
|
/*
|
|
|
|
* Wait for iscsi_eh calls to exit. We don't wait for the tmf to
|
|
|
|
* complete or timeout. The caller just wants to know what's running
|
|
|
|
* is everything that needs to be cleaned up, and no cmds will be
|
|
|
|
* queued.
|
|
|
|
*/
|
|
|
|
mutex_lock(&session->eh_mutex);
|
|
|
|
|
|
|
|
iscsi_suspend_queue(conn);
|
|
|
|
iscsi_suspend_tx(conn);
|
|
|
|
|
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2022-04-08 00:13:12 +00:00
|
|
|
clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
|
|
|
|
|
2021-05-25 18:17:55 +00:00
|
|
|
if (!is_active) {
|
|
|
|
/*
|
|
|
|
* if logout timed out before userspace could even send a PDU
|
|
|
|
* the state might still be in ISCSI_STATE_LOGGED_IN and
|
|
|
|
* allowing new cmds and TMFs.
|
|
|
|
*/
|
|
|
|
if (session->state == ISCSI_STATE_LOGGED_IN)
|
|
|
|
iscsi_set_conn_failed(conn);
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
|
|
|
mutex_unlock(&session->eh_mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_unbind);
|
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
|
2007-12-13 18:43:20 +00:00
|
|
|
struct iscsi_tm *hdr)
|
|
|
|
{
|
|
|
|
memset(hdr, 0, sizeof(*hdr));
|
|
|
|
hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
|
|
|
|
hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
|
|
|
|
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
|
2011-06-16 22:57:09 +00:00
|
|
|
hdr->lun = task->lun;
|
2008-12-02 06:32:05 +00:00
|
|
|
hdr->rtt = task->hdr_itt;
|
|
|
|
hdr->refcmdsn = task->cmdsn;
|
2007-12-13 18:43:20 +00:00
|
|
|
}
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
int iscsi_eh_abort(struct scsi_cmnd *sc)
|
|
|
|
{
|
2008-05-21 20:53:59 +00:00
|
|
|
struct iscsi_cls_session *cls_session;
|
|
|
|
struct iscsi_session *session;
|
2006-08-31 22:09:33 +00:00
|
|
|
struct iscsi_conn *conn;
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_task *task;
|
2007-12-13 18:43:20 +00:00
|
|
|
struct iscsi_tm *hdr;
|
2016-03-30 18:27:08 +00:00
|
|
|
int age;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:53:59 +00:00
|
|
|
cls_session = starget_to_session(scsi_target(sc->device));
|
|
|
|
session = cls_session->dd_data;
|
|
|
|
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
|
2009-05-13 22:57:50 +00:00
|
|
|
|
2022-06-16 22:45:55 +00:00
|
|
|
completion_check:
|
2007-08-15 06:38:30 +00:00
|
|
|
mutex_lock(&session->eh_mutex);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2006-08-31 22:09:33 +00:00
|
|
|
/*
|
|
|
|
* if session was ISCSI_STATE_IN_RECOVERY then we may not have
|
|
|
|
* got the command.
|
|
|
|
*/
|
2022-02-18 19:50:53 +00:00
|
|
|
if (!iscsi_cmd(sc)->task) {
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
|
|
|
|
"it completed.\n");
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-08-15 06:38:30 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
2006-08-31 22:09:33 +00:00
|
|
|
return SUCCESS;
|
|
|
|
}
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
/*
|
|
|
|
* If we are not logged in or we have started a new session
|
|
|
|
* then let the host reset code handle this
|
|
|
|
*/
|
2007-12-13 18:43:20 +00:00
|
|
|
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
|
2022-02-18 19:50:53 +00:00
|
|
|
iscsi_cmd(sc)->age != session->age) {
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-12-13 18:43:20 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "failing abort due to dropped "
|
2009-05-13 22:57:50 +00:00
|
|
|
"session.\n");
|
2007-12-13 18:43:20 +00:00
|
|
|
return FAILED;
|
|
|
|
}
|
|
|
|
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
spin_lock(&session->back_lock);
|
2022-02-18 19:50:53 +00:00
|
|
|
task = iscsi_cmd(sc)->task;
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
if (!task || !task->sc) {
|
|
|
|
/* task completed before time out */
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
|
|
|
|
spin_unlock(&session->back_lock);
|
2021-10-04 21:06:08 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
|
|
|
mutex_unlock(&session->eh_mutex);
|
|
|
|
return SUCCESS;
|
2006-07-24 20:47:22 +00:00
|
|
|
}
|
2021-10-04 21:06:08 +00:00
|
|
|
|
2022-06-16 22:45:55 +00:00
|
|
|
if (!iscsi_get_task(task)) {
|
|
|
|
spin_unlock(&session->back_lock);
|
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
|
|
|
mutex_unlock(&session->eh_mutex);
|
|
|
|
/* We are just about to call iscsi_free_task so wait for it. */
|
|
|
|
udelay(ISCSI_CMD_COMPL_WAIT);
|
|
|
|
goto completion_check;
|
|
|
|
}
|
|
|
|
|
|
|
|
ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
|
2021-10-04 21:06:08 +00:00
|
|
|
conn = session->leadconn;
|
|
|
|
iscsi_get_conn(conn->cls_conn);
|
|
|
|
conn->eh_abort_cnt++;
|
|
|
|
age = session->age;
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
spin_unlock(&session->back_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
if (task->state == ISCSI_TASK_PENDING) {
|
2009-05-13 22:57:49 +00:00
|
|
|
fail_scsi_task(task, DID_ABORT);
|
2007-05-30 17:57:18 +00:00
|
|
|
goto success;
|
|
|
|
}
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2007-12-13 18:43:20 +00:00
|
|
|
/* only have one tmf outstanding at a time */
|
2021-05-25 18:18:06 +00:00
|
|
|
if (session->tmf_state != TMF_INITIAL)
|
2006-04-07 02:13:41 +00:00
|
|
|
goto failed;
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_QUEUED;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2021-05-25 18:18:06 +00:00
|
|
|
hdr = &session->tmhdr;
|
2008-05-21 20:54:09 +00:00
|
|
|
iscsi_prep_abort_task_pdu(task, hdr);
|
2007-12-13 18:43:20 +00:00
|
|
|
|
2016-03-30 18:27:08 +00:00
|
|
|
if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout))
|
2007-12-13 18:43:20 +00:00
|
|
|
goto failed;
|
|
|
|
|
2021-05-25 18:18:06 +00:00
|
|
|
switch (session->tmf_state) {
|
2007-12-13 18:43:20 +00:00
|
|
|
case TMF_SUCCESS:
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2008-05-21 20:54:18 +00:00
|
|
|
/*
|
|
|
|
* stop tx side incase the target had sent a abort rsp but
|
|
|
|
* the initiator was still writing out data.
|
|
|
|
*/
|
2007-08-15 06:38:30 +00:00
|
|
|
iscsi_suspend_tx(conn);
|
2007-05-30 17:57:18 +00:00
|
|
|
/*
|
2008-05-21 20:54:18 +00:00
|
|
|
* we do not stop the recv side because targets have been
|
|
|
|
* good and have never sent us a successful tmf response
|
|
|
|
* then sent more data for the cmd.
|
2007-05-30 17:57:18 +00:00
|
|
|
*/
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2009-05-13 22:57:49 +00:00
|
|
|
fail_scsi_task(task, DID_ABORT);
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_INITIAL;
|
2009-11-11 22:34:32 +00:00
|
|
|
memset(hdr, 0, sizeof(*hdr));
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-08-15 06:38:30 +00:00
|
|
|
iscsi_start_tx(conn);
|
2007-05-30 17:57:18 +00:00
|
|
|
goto success_unlocked;
|
2007-12-13 18:43:20 +00:00
|
|
|
case TMF_TIMEDOUT:
|
2021-05-25 18:18:10 +00:00
|
|
|
session->running_aborted_task = task;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2010-12-31 08:22:18 +00:00
|
|
|
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
|
2007-12-13 18:43:20 +00:00
|
|
|
goto failed_unlocked;
|
|
|
|
case TMF_NOT_FOUND:
|
2021-05-25 18:18:08 +00:00
|
|
|
if (iscsi_task_is_completed(task)) {
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_INITIAL;
|
2009-11-11 22:34:32 +00:00
|
|
|
memset(hdr, 0, sizeof(*hdr));
|
2008-05-21 20:54:09 +00:00
|
|
|
/* task completed before tmf abort response */
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "sc completed while abort in "
|
|
|
|
"progress\n");
|
2007-05-30 17:57:18 +00:00
|
|
|
goto success;
|
2006-07-24 20:47:22 +00:00
|
|
|
}
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2006-07-24 20:47:22 +00:00
|
|
|
default:
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_INITIAL;
|
2007-12-13 18:43:20 +00:00
|
|
|
goto failed;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2007-05-30 17:57:18 +00:00
|
|
|
success:
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-05-30 17:57:18 +00:00
|
|
|
success_unlocked:
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
|
|
|
|
sc, task->itt);
|
scsi: libiscsi: Fix iscsi_task use after free()
The following bug was reported and debugged by wubo40@huawei.com:
When testing kernel 4.18 version, NULL pointer dereference problem occurs
in iscsi_eh_cmd_timed_out() function.
I think this bug in the upstream is still exists.
The analysis reasons are as follows:
1) For some reason, I/O command did not complete within the timeout
period. The block layer timer works, call scsi_times_out() to handle I/O
timeout logic. At the same time the command just completes.
2) scsi_times_out() call iscsi_eh_cmd_timed_out() to process timeout logic.
Although there is an NULL judgment for the task, the task has not been
released yet now.
3) iscsi_complete_task() calls __iscsi_put_task(). The task reference count
reaches zero, the conditions for free task is met, then
iscsi_free_task() frees the task, and sets sc->SCp.ptr = NULL. After
iscsi_eh_cmd_timed_out() passes the task judgment check, there can still
be NULL dereference scenarios.
CPU0 CPU3
|- scsi_times_out() |-
iscsi_complete_task()
| |
|- iscsi_eh_cmd_timed_out() |-
__iscsi_put_task()
| |
|- task=sc->SCp.ptr, task is not NUL, check passed |-
iscsi_free_task(task)
| |
| |-> sc->SCp.ptr
= NULL
| |
|- task is NULL now, NULL pointer dereference |
| |
\|/ \|/
Calltrace:
[380751.840862] BUG: unable to handle kernel NULL pointer dereference at
0000000000000138
[380751.843709] PGD 0 P4D 0
[380751.844770] Oops: 0000 [#1] SMP PTI
[380751.846283] CPU: 0 PID: 403 Comm: kworker/0:1H Kdump: loaded
Tainted: G
[380751.851467] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
[380751.856521] Workqueue: kblockd blk_mq_timeout_work
[380751.858527] RIP: 0010:iscsi_eh_cmd_timed_out+0x15e/0x2e0 [libiscsi]
[380751.861129] Code: 83 ea 01 48 8d 74 d0 08 48 8b 10 48 8b 4a 50 48 85
c9 74 2c 48 39 d5 74
[380751.868811] RSP: 0018:ffffc1e280a5fd58 EFLAGS: 00010246
[380751.870978] RAX: ffff9fd1e84e15e0 RBX: ffff9fd1e84e6dd0 RCX:
0000000116acc580
[380751.873791] RDX: ffff9fd1f97a9400 RSI: ffff9fd1e84e1800 RDI:
ffff9fd1e4d6d420
[380751.876059] RBP: ffff9fd1e4d49000 R08: 0000000116acc580 R09:
0000000116acc580
[380751.878284] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff9fd1e6e931e8
[380751.880500] R13: ffff9fd1e84e6ee0 R14: 0000000000000010 R15:
0000000000000003
[380751.882687] FS: 0000000000000000(0000) GS:ffff9fd1fac00000(0000)
knlGS:0000000000000000
[380751.885236] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[380751.887059] CR2: 0000000000000138 CR3: 000000011860a001 CR4:
00000000003606f0
[380751.889308] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[380751.891523] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[380751.893738] Call Trace:
[380751.894639] scsi_times_out+0x60/0x1c0
[380751.895861] blk_mq_check_expired+0x144/0x200
[380751.897302] ? __switch_to_asm+0x35/0x70
[380751.898551] blk_mq_queue_tag_busy_iter+0x195/0x2e0
[380751.900091] ? __blk_mq_requeue_request+0x100/0x100
[380751.901611] ? __switch_to_asm+0x41/0x70
[380751.902853] ? __blk_mq_requeue_request+0x100/0x100
[380751.904398] blk_mq_timeout_work+0x54/0x130
[380751.905740] process_one_work+0x195/0x390
[380751.907228] worker_thread+0x30/0x390
[380751.908713] ? process_one_work+0x390/0x390
[380751.910350] kthread+0x10d/0x130
[380751.911470] ? kthread_flush_work_fn+0x10/0x10
[380751.913007] ret_from_fork+0x35/0x40
crash> dis -l iscsi_eh_cmd_timed_out+0x15e
xxxxx/drivers/scsi/libiscsi.c: 2062
1970 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd
*sc)
{
...
1984 spin_lock_bh(&session->frwd_lock);
1985 task = (struct iscsi_task *)sc->SCp.ptr;
1986 if (!task) {
1987 /*
1988 * Raced with completion. Blk layer has taken
ownership
1989 * so let timeout code complete it now.
1990 */
1991 rc = BLK_EH_DONE;
1992 goto done;
1993 }
...
2052 for (i = 0; i < conn->session->cmds_max; i++) {
2053 running_task = conn->session->cmds[i];
2054 if (!running_task->sc || running_task == task ||
2055 running_task->state != ISCSI_TASK_RUNNING)
2056 continue;
2057
2058 /*
2059 * Only check if cmds started before this one have
made
2060 * progress, or this could never fail
2061 */
2062 if (time_after(running_task->sc->jiffies_at_alloc,
2063 task->sc->jiffies_at_alloc)) <---
2064 continue;
2065
...
}
carsh> struct scsi_cmnd ffff9fd1e6e931e8
struct scsi_cmnd {
...
SCp = {
ptr = 0x0, <--- iscsi_task
this_residual = 0,
...
},
}
To prevent this, we take a ref to the cmd under the back (completion) lock
so if the completion side were to call iscsi_complete_task() on the task
while the timer/eh paths are not holding the back_lock it will not be freed
from under us.
Note that this requires the previous patch, "scsi: libiscsi: Drop
taskqueuelock" because bnx2i sleeps in its cleanup_task callout if the cmd
is aborted. If the EH/timer and completion path are racing we don't know
which path will do the last put. The previous patch moved the operations we
needed to do under the forward lock to cleanup_queued_task. Once that has
run we can drop the forward lock for the cmd and bnx2i no longer has to
worry about if the EH, timer or completion path did the ast put and if the
forward lock is held or not since it won't be.
Link: https://lore.kernel.org/r/20210207044608.27585-4-michael.christie@oracle.com
Reported-by: Wu Bo <wubo40@huawei.com>
Reviewed-by: Lee Duncan <lduncan@suse.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-07 04:46:02 +00:00
|
|
|
iscsi_put_task(task);
|
2021-05-25 18:18:04 +00:00
|
|
|
iscsi_put_conn(conn->cls_conn);
|
2007-08-15 06:38:30 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
2006-04-07 02:13:41 +00:00
|
|
|
return SUCCESS;
|
|
|
|
|
|
|
|
failed:
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-05-30 17:57:18 +00:00
|
|
|
failed_unlocked:
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
|
|
|
|
task ? task->itt : 0);
|
2021-05-25 18:18:10 +00:00
|
|
|
/*
|
|
|
|
* The driver might be accessing the task so hold the ref. The conn
|
|
|
|
* stop cleanup will drop the ref after ep_disconnect so we know the
|
|
|
|
* driver's no longer touching the task.
|
|
|
|
*/
|
|
|
|
if (!session->running_aborted_task)
|
|
|
|
iscsi_put_task(task);
|
|
|
|
|
2021-05-25 18:18:04 +00:00
|
|
|
iscsi_put_conn(conn->cls_conn);
|
2007-08-15 06:38:30 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
2006-04-07 02:13:41 +00:00
|
|
|
return FAILED;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_eh_abort);
|
|
|
|
|
2007-12-13 18:43:20 +00:00
|
|
|
static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
|
|
|
|
{
|
|
|
|
memset(hdr, 0, sizeof(*hdr));
|
|
|
|
hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
|
|
|
|
hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
|
|
|
|
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
|
2011-06-16 22:57:09 +00:00
|
|
|
int_to_scsilun(sc->device->lun, &hdr->lun);
|
2007-12-13 18:43:30 +00:00
|
|
|
hdr->rtt = RESERVED_ITT;
|
2007-12-13 18:43:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int iscsi_eh_device_reset(struct scsi_cmnd *sc)
|
|
|
|
{
|
2008-05-21 20:53:59 +00:00
|
|
|
struct iscsi_cls_session *cls_session;
|
|
|
|
struct iscsi_session *session;
|
2007-12-13 18:43:20 +00:00
|
|
|
struct iscsi_conn *conn;
|
|
|
|
struct iscsi_tm *hdr;
|
|
|
|
int rc = FAILED;
|
|
|
|
|
2008-05-21 20:53:59 +00:00
|
|
|
cls_session = starget_to_session(scsi_target(sc->device));
|
|
|
|
session = cls_session->dd_data;
|
|
|
|
|
2014-06-25 13:27:36 +00:00
|
|
|
ISCSI_DBG_EH(session, "LU Reset [sc %p lun %llu]\n", sc,
|
|
|
|
sc->device->lun);
|
2007-12-13 18:43:20 +00:00
|
|
|
|
|
|
|
mutex_lock(&session->eh_mutex);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2007-12-13 18:43:20 +00:00
|
|
|
/*
|
|
|
|
* Just check if we are not logged in. We cannot check for
|
|
|
|
* the phase because the reset could come from a ioctl.
|
|
|
|
*/
|
|
|
|
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
|
|
|
|
goto unlock;
|
|
|
|
conn = session->leadconn;
|
|
|
|
|
|
|
|
/* only have one tmf outstanding at a time */
|
2021-05-25 18:18:06 +00:00
|
|
|
if (session->tmf_state != TMF_INITIAL)
|
2007-12-13 18:43:20 +00:00
|
|
|
goto unlock;
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_QUEUED;
|
2007-12-13 18:43:20 +00:00
|
|
|
|
2021-05-25 18:18:06 +00:00
|
|
|
hdr = &session->tmhdr;
|
2007-12-13 18:43:20 +00:00
|
|
|
iscsi_prep_lun_reset_pdu(sc, hdr);
|
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
|
2007-12-13 18:43:30 +00:00
|
|
|
session->lu_reset_timeout)) {
|
2007-12-13 18:43:20 +00:00
|
|
|
rc = FAILED;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2021-05-25 18:18:06 +00:00
|
|
|
switch (session->tmf_state) {
|
2007-12-13 18:43:20 +00:00
|
|
|
case TMF_SUCCESS:
|
|
|
|
break;
|
|
|
|
case TMF_TIMEDOUT:
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2010-12-31 08:22:18 +00:00
|
|
|
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
|
2007-12-13 18:43:20 +00:00
|
|
|
goto done;
|
|
|
|
default:
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_INITIAL;
|
2007-12-13 18:43:20 +00:00
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = SUCCESS;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-12-13 18:43:20 +00:00
|
|
|
|
|
|
|
iscsi_suspend_tx(conn);
|
2008-05-21 20:54:18 +00:00
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2009-11-11 22:34:32 +00:00
|
|
|
memset(hdr, 0, sizeof(*hdr));
|
2009-05-13 22:57:46 +00:00
|
|
|
fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_INITIAL;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-12-13 18:43:20 +00:00
|
|
|
|
|
|
|
iscsi_start_tx(conn);
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
unlock:
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-12-13 18:43:20 +00:00
|
|
|
done:
|
2009-06-16 03:11:10 +00:00
|
|
|
ISCSI_DBG_EH(session, "dev reset result = %s\n",
|
|
|
|
rc == SUCCESS ? "SUCCESS" : "FAILED");
|
2007-12-13 18:43:20 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
|
|
|
|
|
2009-11-11 22:34:33 +00:00
|
|
|
void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
|
|
|
|
{
|
|
|
|
struct iscsi_session *session = cls_session->dd_data;
|
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
if (session->state != ISCSI_STATE_LOGGED_IN) {
|
|
|
|
session->state = ISCSI_STATE_RECOVERY_FAILED;
|
2021-05-25 18:18:06 +00:00
|
|
|
wake_up(&session->ehwait);
|
2009-11-11 22:34:33 +00:00
|
|
|
}
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iscsi_eh_session_reset - drop session and attempt relogin
|
|
|
|
* @sc: scsi command
|
|
|
|
*
|
|
|
|
* This function will wait for a relogin, session termination from
|
|
|
|
* userspace, or a recovery/replacement timeout.
|
|
|
|
*/
|
2010-02-20 02:32:10 +00:00
|
|
|
int iscsi_eh_session_reset(struct scsi_cmnd *sc)
|
2009-11-11 22:34:33 +00:00
|
|
|
{
|
|
|
|
struct iscsi_cls_session *cls_session;
|
|
|
|
struct iscsi_session *session;
|
|
|
|
struct iscsi_conn *conn;
|
|
|
|
|
|
|
|
cls_session = starget_to_session(scsi_target(sc->device));
|
|
|
|
session = cls_session->dd_data;
|
|
|
|
|
|
|
|
mutex_lock(&session->eh_mutex);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
if (session->state == ISCSI_STATE_TERMINATE) {
|
|
|
|
failed:
|
|
|
|
ISCSI_DBG_EH(session,
|
|
|
|
"failing session reset: Could not log back into "
|
2018-11-21 01:22:45 +00:00
|
|
|
"%s [age %d]\n", session->targetname,
|
|
|
|
session->age);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
|
|
|
return FAILED;
|
|
|
|
}
|
|
|
|
|
2021-05-25 18:18:05 +00:00
|
|
|
conn = session->leadconn;
|
|
|
|
iscsi_get_conn(conn->cls_conn);
|
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
2021-05-25 18:18:05 +00:00
|
|
|
|
2010-12-31 08:22:18 +00:00
|
|
|
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
|
2021-05-25 18:18:05 +00:00
|
|
|
iscsi_put_conn(conn->cls_conn);
|
2009-11-11 22:34:33 +00:00
|
|
|
|
|
|
|
ISCSI_DBG_EH(session, "wait for relogin\n");
|
2021-05-25 18:18:06 +00:00
|
|
|
wait_event_interruptible(session->ehwait,
|
2009-11-11 22:34:33 +00:00
|
|
|
session->state == ISCSI_STATE_TERMINATE ||
|
|
|
|
session->state == ISCSI_STATE_LOGGED_IN ||
|
|
|
|
session->state == ISCSI_STATE_RECOVERY_FAILED);
|
|
|
|
if (signal_pending(current))
|
|
|
|
flush_signals(current);
|
|
|
|
|
|
|
|
mutex_lock(&session->eh_mutex);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
if (session->state == ISCSI_STATE_LOGGED_IN) {
|
|
|
|
ISCSI_DBG_EH(session,
|
|
|
|
"session reset succeeded for %s,%s\n",
|
|
|
|
session->targetname, conn->persistent_address);
|
|
|
|
} else
|
|
|
|
goto failed;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
|
|
|
return SUCCESS;
|
|
|
|
}
|
2010-02-20 02:32:10 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_eh_session_reset);
|
2009-11-11 22:34:33 +00:00
|
|
|
|
|
|
|
static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
|
|
|
|
{
|
|
|
|
memset(hdr, 0, sizeof(*hdr));
|
|
|
|
hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
|
|
|
|
hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK;
|
|
|
|
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
|
|
|
|
hdr->rtt = RESERVED_ITT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iscsi_eh_target_reset - reset target
|
|
|
|
* @sc: scsi command
|
|
|
|
*
|
2010-02-20 02:32:10 +00:00
|
|
|
* This will attempt to send a warm target reset.
|
2009-11-11 22:34:33 +00:00
|
|
|
*/
|
2016-03-30 18:26:46 +00:00
|
|
|
static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
|
2009-11-11 22:34:33 +00:00
|
|
|
{
|
|
|
|
struct iscsi_cls_session *cls_session;
|
|
|
|
struct iscsi_session *session;
|
|
|
|
struct iscsi_conn *conn;
|
|
|
|
struct iscsi_tm *hdr;
|
|
|
|
int rc = FAILED;
|
|
|
|
|
|
|
|
cls_session = starget_to_session(scsi_target(sc->device));
|
|
|
|
session = cls_session->dd_data;
|
|
|
|
|
|
|
|
ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc,
|
|
|
|
session->targetname);
|
|
|
|
|
|
|
|
mutex_lock(&session->eh_mutex);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
/*
|
|
|
|
* Just check if we are not logged in. We cannot check for
|
|
|
|
* the phase because the reset could come from a ioctl.
|
|
|
|
*/
|
|
|
|
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
|
|
|
|
goto unlock;
|
|
|
|
conn = session->leadconn;
|
|
|
|
|
|
|
|
/* only have one tmf outstanding at a time */
|
2021-05-25 18:18:06 +00:00
|
|
|
if (session->tmf_state != TMF_INITIAL)
|
2009-11-11 22:34:33 +00:00
|
|
|
goto unlock;
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_QUEUED;
|
2009-11-11 22:34:33 +00:00
|
|
|
|
2021-05-25 18:18:06 +00:00
|
|
|
hdr = &session->tmhdr;
|
2009-11-11 22:34:33 +00:00
|
|
|
iscsi_prep_tgt_reset_pdu(sc, hdr);
|
|
|
|
|
|
|
|
if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
|
|
|
|
session->tgt_reset_timeout)) {
|
|
|
|
rc = FAILED;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2021-05-25 18:18:06 +00:00
|
|
|
switch (session->tmf_state) {
|
2009-11-11 22:34:33 +00:00
|
|
|
case TMF_SUCCESS:
|
|
|
|
break;
|
|
|
|
case TMF_TIMEDOUT:
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2010-12-31 08:22:18 +00:00
|
|
|
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
|
2009-11-11 22:34:33 +00:00
|
|
|
goto done;
|
|
|
|
default:
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_INITIAL;
|
2009-11-11 22:34:33 +00:00
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = SUCCESS;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
|
|
|
|
iscsi_suspend_tx(conn);
|
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
memset(hdr, 0, sizeof(*hdr));
|
|
|
|
fail_scsi_tasks(conn, -1, DID_ERROR);
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_INITIAL;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
|
|
|
|
iscsi_start_tx(conn);
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
unlock:
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2009-11-11 22:34:33 +00:00
|
|
|
done:
|
|
|
|
ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
|
|
|
|
rc == SUCCESS ? "SUCCESS" : "FAILED");
|
|
|
|
mutex_unlock(&session->eh_mutex);
|
2010-02-20 02:32:10 +00:00
|
|
|
return rc;
|
|
|
|
}
|
2009-11-11 22:34:33 +00:00
|
|
|
|
2010-02-20 02:32:10 +00:00
|
|
|
/**
|
|
|
|
* iscsi_eh_recover_target - reset target and possibly the session
|
|
|
|
* @sc: scsi command
|
|
|
|
*
|
|
|
|
* This will attempt to send a warm target reset. If that fails,
|
|
|
|
* we will escalate to ERL0 session recovery.
|
|
|
|
*/
|
|
|
|
int iscsi_eh_recover_target(struct scsi_cmnd *sc)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = iscsi_eh_target_reset(sc);
|
2009-11-11 22:34:33 +00:00
|
|
|
if (rc == FAILED)
|
|
|
|
rc = iscsi_eh_session_reset(sc);
|
|
|
|
return rc;
|
|
|
|
}
|
2010-02-20 02:32:10 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_eh_recover_target);
|
2009-11-11 22:34:33 +00:00
|
|
|
|
2007-12-13 18:43:25 +00:00
|
|
|
/*
|
|
|
|
* Pre-allocate a pool of @max items of @item_size. By default, the pool
|
|
|
|
* should be accessed via kfifo_{get,put} on q->queue.
|
|
|
|
* Optionally, the caller can obtain the array of object pointers
|
|
|
|
* by passing in a non-NULL @items pointer
|
|
|
|
*/
|
2006-04-07 02:13:41 +00:00
|
|
|
int
|
2007-12-13 18:43:25 +00:00
|
|
|
iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2007-12-13 18:43:25 +00:00
|
|
|
int i, num_arrays = 1;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2007-12-13 18:43:25 +00:00
|
|
|
memset(q, 0, sizeof(*q));
|
2006-04-07 02:13:41 +00:00
|
|
|
|
|
|
|
q->max = max;
|
2007-12-13 18:43:25 +00:00
|
|
|
|
|
|
|
/* If the user passed an items pointer, he wants a copy of
|
|
|
|
* the array. */
|
|
|
|
if (items)
|
|
|
|
num_arrays++;
|
treewide: kvzalloc() -> kvcalloc()
The kvzalloc() function has a 2-factor argument form, kvcalloc(). This
patch replaces cases of:
kvzalloc(a * b, gfp)
with:
kvcalloc(a * b, gfp)
as well as handling cases of:
kvzalloc(a * b * c, gfp)
with:
kvzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kvcalloc(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kvzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kvzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kvzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kvzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kvzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kvzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kvzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kvzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kvzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kvzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kvzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kvzalloc
+ kvcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvzalloc
+ kvcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvzalloc
+ kvcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvzalloc
+ kvcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvzalloc
+ kvcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvzalloc
+ kvcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvzalloc
+ kvcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kvzalloc
+ kvcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kvzalloc
+ kvcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kvzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kvzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kvzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kvzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kvzalloc(C1 * C2 * C3, ...)
|
kvzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kvzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kvzalloc(sizeof(THING) * C2, ...)
|
kvzalloc(sizeof(TYPE) * C2, ...)
|
kvzalloc(C1 * C2 * C3, ...)
|
kvzalloc(C1 * C2, ...)
|
- kvzalloc
+ kvcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kvzalloc
+ kvcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kvzalloc
+ kvcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kvzalloc
+ kvcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kvzalloc
+ kvcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kvzalloc
+ kvcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kvzalloc
+ kvcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:04:48 +00:00
|
|
|
q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL);
|
2007-12-13 18:43:25 +00:00
|
|
|
if (q->pool == NULL)
|
2009-03-05 20:45:55 +00:00
|
|
|
return -ENOMEM;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2009-12-21 22:37:27 +00:00
|
|
|
kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
|
2006-04-07 02:13:41 +00:00
|
|
|
|
|
|
|
for (i = 0; i < max; i++) {
|
2007-12-13 18:43:25 +00:00
|
|
|
q->pool[i] = kzalloc(item_size, GFP_KERNEL);
|
2006-04-07 02:13:41 +00:00
|
|
|
if (q->pool[i] == NULL) {
|
2007-12-13 18:43:25 +00:00
|
|
|
q->max = i;
|
|
|
|
goto enomem;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
2009-12-21 22:37:28 +00:00
|
|
|
kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
2007-12-13 18:43:25 +00:00
|
|
|
|
|
|
|
if (items) {
|
|
|
|
*items = q->pool + max;
|
|
|
|
memcpy(*items, q->pool, max * sizeof(void *));
|
|
|
|
}
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
return 0;
|
2007-12-13 18:43:25 +00:00
|
|
|
|
|
|
|
enomem:
|
|
|
|
iscsi_pool_free(q);
|
|
|
|
return -ENOMEM;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_pool_init);
|
|
|
|
|
2007-12-13 18:43:25 +00:00
|
|
|
void iscsi_pool_free(struct iscsi_pool *q)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < q->max; i++)
|
2007-12-13 18:43:25 +00:00
|
|
|
kfree(q->pool[i]);
|
2017-05-17 20:21:54 +00:00
|
|
|
kvfree(q->pool);
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_pool_free);
|
|
|
|
|
2021-02-07 04:46:04 +00:00
|
|
|
int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
|
|
|
|
uint16_t requested_cmds_max)
|
|
|
|
{
|
|
|
|
int scsi_cmds, total_cmds = requested_cmds_max;
|
|
|
|
|
|
|
|
check:
|
|
|
|
if (!total_cmds)
|
|
|
|
total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
|
|
|
|
/*
|
|
|
|
* The iscsi layer needs some tasks for nop handling and tmfs,
|
|
|
|
* so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
|
|
|
|
* + 1 command for scsi IO.
|
|
|
|
*/
|
|
|
|
if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
|
|
|
|
printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of two that is at least %d.\n",
|
|
|
|
total_cmds, ISCSI_TOTAL_CMDS_MIN);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
|
|
|
|
printk(KERN_INFO "iscsi: invalid max cmds of %d. Must be a power of 2 less than or equal to %d. Using %d.\n",
|
|
|
|
requested_cmds_max, ISCSI_TOTAL_CMDS_MAX,
|
|
|
|
ISCSI_TOTAL_CMDS_MAX);
|
|
|
|
total_cmds = ISCSI_TOTAL_CMDS_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_power_of_2(total_cmds)) {
|
|
|
|
total_cmds = rounddown_pow_of_two(total_cmds);
|
|
|
|
if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
|
|
|
|
printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of 2 greater than %d.\n", requested_cmds_max, ISCSI_TOTAL_CMDS_MIN);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_INFO "iscsi: invalid max cmds %d. Must be a power of 2. Rounding max cmds down to %d.\n",
|
|
|
|
requested_cmds_max, total_cmds);
|
|
|
|
}
|
|
|
|
|
|
|
|
scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
|
|
|
|
if (shost->can_queue && scsi_cmds > shost->can_queue) {
|
|
|
|
total_cmds = shost->can_queue;
|
|
|
|
|
|
|
|
printk(KERN_INFO "iscsi: requested max cmds %u is higher than driver limit. Using driver limit %u\n",
|
|
|
|
requested_cmds_max, shost->can_queue);
|
|
|
|
goto check;
|
|
|
|
}
|
|
|
|
|
|
|
|
return scsi_cmds;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_host_get_max_scsi_cmds);
|
|
|
|
|
2008-05-21 20:54:00 +00:00
|
|
|
/**
|
|
|
|
* iscsi_host_add - add host to system
|
|
|
|
* @shost: scsi host
|
|
|
|
* @pdev: parent device
|
|
|
|
*
|
|
|
|
* This should be called by partial offload and software iscsi drivers
|
|
|
|
* to add a host to the system.
|
|
|
|
*/
|
|
|
|
int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
|
|
|
|
{
|
2008-06-16 15:11:33 +00:00
|
|
|
if (!shost->can_queue)
|
|
|
|
shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
|
|
|
|
|
2009-03-05 20:46:04 +00:00
|
|
|
if (!shost->cmd_per_lun)
|
|
|
|
shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN;
|
|
|
|
|
2008-05-21 20:54:00 +00:00
|
|
|
return scsi_add_host(shost, pdev);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_host_add);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iscsi_host_alloc - allocate a host and driver data
|
|
|
|
* @sht: scsi host template
|
|
|
|
* @dd_data_size: driver host data size
|
2009-03-05 20:46:03 +00:00
|
|
|
* @xmit_can_sleep: bool indicating if LLD will queue IO from a work queue
|
2008-05-21 20:54:00 +00:00
|
|
|
*
|
|
|
|
* This should be called by partial offload and software iscsi drivers.
|
|
|
|
* To access the driver specific memory use the iscsi_host_priv() macro.
|
|
|
|
*/
|
2023-03-22 19:54:44 +00:00
|
|
|
struct Scsi_Host *iscsi_host_alloc(const struct scsi_host_template *sht,
|
2009-03-05 20:46:04 +00:00
|
|
|
int dd_data_size, bool xmit_can_sleep)
|
2008-05-21 20:53:59 +00:00
|
|
|
{
|
2008-05-21 20:54:00 +00:00
|
|
|
struct Scsi_Host *shost;
|
2008-09-24 16:46:10 +00:00
|
|
|
struct iscsi_host *ihost;
|
2008-05-21 20:54:00 +00:00
|
|
|
|
|
|
|
shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
|
|
|
|
if (!shost)
|
|
|
|
return NULL;
|
2008-09-24 16:46:10 +00:00
|
|
|
ihost = shost_priv(shost);
|
2009-03-05 20:46:03 +00:00
|
|
|
|
|
|
|
if (xmit_can_sleep) {
|
2022-02-26 23:04:35 +00:00
|
|
|
ihost->workq = alloc_workqueue("iscsi_q_%d",
|
2020-05-05 01:19:08 +00:00
|
|
|
WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
|
2022-02-26 23:04:35 +00:00
|
|
|
1, shost->host_no);
|
2009-03-05 20:46:03 +00:00
|
|
|
if (!ihost->workq)
|
|
|
|
goto free_host;
|
|
|
|
}
|
|
|
|
|
2008-09-24 16:46:10 +00:00
|
|
|
spin_lock_init(&ihost->lock);
|
|
|
|
ihost->state = ISCSI_HOST_SETUP;
|
|
|
|
ihost->num_sessions = 0;
|
|
|
|
init_waitqueue_head(&ihost->session_removal_wq);
|
2008-05-21 20:54:00 +00:00
|
|
|
return shost;
|
2009-03-05 20:46:03 +00:00
|
|
|
|
|
|
|
free_host:
|
|
|
|
scsi_host_put(shost);
|
|
|
|
return NULL;
|
2008-05-21 20:54:00 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_host_alloc);
|
|
|
|
|
2008-09-24 16:46:10 +00:00
|
|
|
static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
|
|
|
|
{
|
2009-03-05 20:46:05 +00:00
|
|
|
iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_INVALID_HOST);
|
2008-09-24 16:46:10 +00:00
|
|
|
}
|
|
|
|
|
2008-05-21 20:54:00 +00:00
|
|
|
/**
|
|
|
|
* iscsi_host_remove - remove host and sessions
|
|
|
|
* @shost: scsi host
|
2022-06-16 22:27:38 +00:00
|
|
|
* @is_shutdown: true if called from a driver shutdown callout
|
2008-05-21 20:54:00 +00:00
|
|
|
*
|
2008-09-24 16:46:10 +00:00
|
|
|
* If there are any sessions left, this will initiate the removal and wait
|
|
|
|
* for the completion.
|
2008-05-21 20:54:00 +00:00
|
|
|
*/
|
2022-06-16 22:27:38 +00:00
|
|
|
void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown)
|
2008-05-21 20:54:00 +00:00
|
|
|
{
|
2008-09-24 16:46:10 +00:00
|
|
|
struct iscsi_host *ihost = shost_priv(shost);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ihost->lock, flags);
|
|
|
|
ihost->state = ISCSI_HOST_REMOVED;
|
|
|
|
spin_unlock_irqrestore(&ihost->lock, flags);
|
|
|
|
|
2022-06-16 22:27:38 +00:00
|
|
|
if (!is_shutdown)
|
|
|
|
iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
|
|
|
|
else
|
|
|
|
iscsi_host_for_each_session(shost, iscsi_force_destroy_session);
|
|
|
|
|
2008-09-24 16:46:10 +00:00
|
|
|
wait_event_interruptible(ihost->session_removal_wq,
|
|
|
|
ihost->num_sessions == 0);
|
|
|
|
if (signal_pending(current))
|
|
|
|
flush_signals(current);
|
|
|
|
|
2008-05-21 20:54:00 +00:00
|
|
|
scsi_remove_host(shost);
|
2008-05-21 20:53:59 +00:00
|
|
|
}
|
2008-05-21 20:54:00 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_host_remove);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:54:00 +00:00
|
|
|
void iscsi_host_free(struct Scsi_Host *shost)
|
2008-05-21 20:53:59 +00:00
|
|
|
{
|
|
|
|
struct iscsi_host *ihost = shost_priv(shost);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2021-02-07 04:46:03 +00:00
|
|
|
if (ihost->workq)
|
|
|
|
destroy_workqueue(ihost->workq);
|
|
|
|
|
2008-05-21 20:53:59 +00:00
|
|
|
kfree(ihost->netdev);
|
|
|
|
kfree(ihost->hwaddress);
|
|
|
|
kfree(ihost->initiatorname);
|
2008-05-21 20:54:00 +00:00
|
|
|
scsi_host_put(shost);
|
2008-05-21 20:53:59 +00:00
|
|
|
}
|
2008-05-21 20:54:00 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_host_free);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-09-24 16:46:10 +00:00
|
|
|
static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
|
|
|
|
{
|
|
|
|
struct iscsi_host *ihost = shost_priv(shost);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
shost = scsi_host_get(shost);
|
|
|
|
if (!shost) {
|
|
|
|
printk(KERN_ERR "Invalid state. Cannot notify host removal "
|
|
|
|
"of session teardown event because host already "
|
|
|
|
"removed.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ihost->lock, flags);
|
|
|
|
ihost->num_sessions--;
|
|
|
|
if (ihost->num_sessions == 0)
|
|
|
|
wake_up(&ihost->session_removal_wq);
|
|
|
|
spin_unlock_irqrestore(&ihost->lock, flags);
|
|
|
|
scsi_host_put(shost);
|
|
|
|
}
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
/**
|
|
|
|
* iscsi_session_setup - create iscsi cls session and host and session
|
|
|
|
* @iscsit: iscsi transport template
|
2008-05-21 20:53:59 +00:00
|
|
|
* @shost: scsi host
|
|
|
|
* @cmds_max: session can queue
|
2017-12-22 22:08:27 +00:00
|
|
|
* @dd_size: private driver data size, added to session allocation size
|
2008-05-21 20:54:09 +00:00
|
|
|
* @cmd_task_size: LLD task private data size
|
2006-04-07 02:13:41 +00:00
|
|
|
* @initial_cmdsn: initial CmdSN
|
2017-12-22 22:08:27 +00:00
|
|
|
* @id: target ID to add to this session
|
2006-04-07 02:13:41 +00:00
|
|
|
*
|
|
|
|
* This can be used by software iscsi_transports that allocate
|
|
|
|
* a session per scsi host.
|
2008-05-21 20:54:17 +00:00
|
|
|
*
|
|
|
|
* Callers should set cmds_max to the largest total numer (mgmt + scsi) of
|
|
|
|
* tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
|
|
|
|
* for nop handling and login/logout requests.
|
2008-05-21 20:53:59 +00:00
|
|
|
*/
|
2006-04-07 02:13:41 +00:00
|
|
|
struct iscsi_cls_session *
|
2008-05-21 20:53:59 +00:00
|
|
|
iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
|
2009-09-22 02:51:22 +00:00
|
|
|
uint16_t cmds_max, int dd_size, int cmd_task_size,
|
2008-05-21 20:54:12 +00:00
|
|
|
uint32_t initial_cmdsn, unsigned int id)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2008-09-24 16:46:10 +00:00
|
|
|
struct iscsi_host *ihost = shost_priv(shost);
|
2006-04-07 02:13:41 +00:00
|
|
|
struct iscsi_session *session;
|
|
|
|
struct iscsi_cls_session *cls_session;
|
2021-02-07 04:46:04 +00:00
|
|
|
int cmd_i, scsi_cmds;
|
2008-09-24 16:46:10 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ihost->lock, flags);
|
|
|
|
if (ihost->state == ISCSI_HOST_REMOVED) {
|
|
|
|
spin_unlock_irqrestore(&ihost->lock, flags);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
ihost->num_sessions++;
|
|
|
|
spin_unlock_irqrestore(&ihost->lock, flags);
|
2008-06-16 15:11:33 +00:00
|
|
|
|
2021-02-07 04:46:04 +00:00
|
|
|
scsi_cmds = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
|
|
|
|
if (scsi_cmds < 0)
|
2008-09-24 16:46:10 +00:00
|
|
|
goto dec_session_count;
|
2007-05-30 17:57:19 +00:00
|
|
|
|
2008-05-21 20:54:01 +00:00
|
|
|
cls_session = iscsi_alloc_session(shost, iscsit,
|
2009-09-22 02:51:22 +00:00
|
|
|
sizeof(struct iscsi_session) +
|
|
|
|
dd_size);
|
2008-05-21 20:53:59 +00:00
|
|
|
if (!cls_session)
|
2008-09-24 16:46:10 +00:00
|
|
|
goto dec_session_count;
|
2008-05-21 20:53:59 +00:00
|
|
|
session = cls_session->dd_data;
|
|
|
|
session->cls_session = cls_session;
|
2006-04-07 02:13:41 +00:00
|
|
|
session->host = shost;
|
|
|
|
session->state = ISCSI_STATE_FREE;
|
2007-12-13 18:43:30 +00:00
|
|
|
session->fast_abort = 1;
|
2009-11-11 22:34:33 +00:00
|
|
|
session->tgt_reset_timeout = 30;
|
2007-12-13 18:43:38 +00:00
|
|
|
session->lu_reset_timeout = 15;
|
|
|
|
session->abort_timeout = 10;
|
2008-05-21 20:54:17 +00:00
|
|
|
session->scsi_cmds_max = scsi_cmds;
|
2021-02-07 04:46:04 +00:00
|
|
|
session->cmds_max = scsi_cmds + ISCSI_MGMT_CMDS_MAX;
|
2007-07-26 17:46:48 +00:00
|
|
|
session->queued_cmdsn = session->cmdsn = initial_cmdsn;
|
2006-04-07 02:13:41 +00:00
|
|
|
session->exp_cmdsn = initial_cmdsn + 1;
|
|
|
|
session->max_cmdsn = initial_cmdsn + 1;
|
|
|
|
session->max_r2t = 1;
|
|
|
|
session->tt = iscsit;
|
2009-09-22 02:51:22 +00:00
|
|
|
session->dd_data = cls_session->dd_data + sizeof(*session);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_INITIAL;
|
|
|
|
timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0);
|
2007-08-15 06:38:30 +00:00
|
|
|
mutex_init(&session->eh_mutex);
|
2021-09-11 13:51:59 +00:00
|
|
|
init_waitqueue_head(&session->ehwait);
|
2021-05-25 18:18:06 +00:00
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_init(&session->frwd_lock);
|
|
|
|
spin_lock_init(&session->back_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
|
|
|
/* initialize SCSI PDU commands pool */
|
|
|
|
if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
|
|
|
|
(void***)&session->cmds,
|
2008-05-21 20:54:09 +00:00
|
|
|
cmd_task_size + sizeof(struct iscsi_task)))
|
2006-04-07 02:13:41 +00:00
|
|
|
goto cmdpool_alloc_fail;
|
|
|
|
|
|
|
|
/* pre-format cmds pool with ITT */
|
|
|
|
for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
|
2008-05-21 20:54:09 +00:00
|
|
|
struct iscsi_task *task = session->cmds[cmd_i];
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
if (cmd_task_size)
|
|
|
|
task->dd_data = &task[1];
|
|
|
|
task->itt = cmd_i;
|
2009-05-13 22:57:46 +00:00
|
|
|
task->state = ISCSI_TASK_FREE;
|
2008-05-21 20:54:09 +00:00
|
|
|
INIT_LIST_HEAD(&task->running);
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2006-06-28 17:00:27 +00:00
|
|
|
if (!try_module_get(iscsit->owner))
|
2008-05-21 20:53:59 +00:00
|
|
|
goto module_get_fail;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:54:12 +00:00
|
|
|
if (iscsi_add_session(cls_session, id))
|
2008-05-21 20:53:59 +00:00
|
|
|
goto cls_session_fail;
|
2008-09-24 16:46:10 +00:00
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
return cls_session;
|
|
|
|
|
|
|
|
cls_session_fail:
|
2008-05-21 20:53:59 +00:00
|
|
|
module_put(iscsit->owner);
|
|
|
|
module_get_fail:
|
2007-12-13 18:43:25 +00:00
|
|
|
iscsi_pool_free(&session->cmdpool);
|
2006-04-07 02:13:41 +00:00
|
|
|
cmdpool_alloc_fail:
|
2008-05-21 20:53:59 +00:00
|
|
|
iscsi_free_session(cls_session);
|
2008-09-24 16:46:10 +00:00
|
|
|
dec_session_count:
|
|
|
|
iscsi_host_dec_session_cnt(shost);
|
2006-04-07 02:13:41 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_session_setup);
|
|
|
|
|
2023-01-17 19:39:36 +00:00
|
|
|
/*
|
|
|
|
* issi_session_remove - Remove session from iSCSI class.
|
2008-05-21 20:53:59 +00:00
|
|
|
*/
|
2023-01-17 19:39:36 +00:00
|
|
|
void iscsi_session_remove(struct iscsi_cls_session *cls_session)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2008-05-21 20:53:59 +00:00
|
|
|
struct iscsi_session *session = cls_session->dd_data;
|
2008-09-24 16:46:10 +00:00
|
|
|
struct Scsi_Host *shost = session->host;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2017-07-13 16:11:21 +00:00
|
|
|
iscsi_remove_session(cls_session);
|
2023-01-17 19:39:36 +00:00
|
|
|
/*
|
|
|
|
* host removal only has to wait for its children to be removed from
|
|
|
|
* sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing
|
|
|
|
* the session, so drop the session count here.
|
|
|
|
*/
|
|
|
|
iscsi_host_dec_session_cnt(shost);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_session_remove);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iscsi_session_free - Free iscsi session and it's resources
|
|
|
|
* @cls_session: iscsi session
|
|
|
|
*/
|
|
|
|
void iscsi_session_free(struct iscsi_cls_session *cls_session)
|
|
|
|
{
|
|
|
|
struct iscsi_session *session = cls_session->dd_data;
|
|
|
|
struct module *owner = cls_session->transport->owner;
|
2017-07-13 16:11:21 +00:00
|
|
|
|
2021-05-25 18:18:11 +00:00
|
|
|
iscsi_pool_free(&session->cmdpool);
|
2007-05-30 17:57:16 +00:00
|
|
|
kfree(session->password);
|
|
|
|
kfree(session->password_in);
|
|
|
|
kfree(session->username);
|
|
|
|
kfree(session->username_in);
|
2006-07-24 20:47:50 +00:00
|
|
|
kfree(session->targetname);
|
2012-01-19 11:06:53 +00:00
|
|
|
kfree(session->targetalias);
|
2008-05-21 20:54:16 +00:00
|
|
|
kfree(session->initiatorname);
|
2013-06-20 17:21:26 +00:00
|
|
|
kfree(session->boot_root);
|
|
|
|
kfree(session->boot_nic);
|
|
|
|
kfree(session->boot_target);
|
2008-05-21 20:54:16 +00:00
|
|
|
kfree(session->ifacename);
|
2013-07-01 09:54:12 +00:00
|
|
|
kfree(session->portal_type);
|
|
|
|
kfree(session->discovery_parent_type);
|
2006-07-24 20:47:50 +00:00
|
|
|
|
2017-07-13 16:11:21 +00:00
|
|
|
iscsi_free_session(cls_session);
|
2006-07-24 20:47:29 +00:00
|
|
|
module_put(owner);
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
2023-01-17 19:39:36 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_session_free);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iscsi_session_teardown - destroy session and cls_session
|
|
|
|
* @cls_session: iscsi session
|
|
|
|
*/
|
|
|
|
void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
|
|
|
|
{
|
|
|
|
iscsi_session_remove(cls_session);
|
|
|
|
iscsi_session_free(cls_session);
|
|
|
|
}
|
2006-04-07 02:13:41 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_session_teardown);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
|
|
|
|
* @cls_session: iscsi_cls_session
|
2008-05-21 20:54:01 +00:00
|
|
|
* @dd_size: private driver data size
|
2006-04-07 02:13:41 +00:00
|
|
|
* @conn_idx: cid
|
2008-05-21 20:54:01 +00:00
|
|
|
*/
|
2006-04-07 02:13:41 +00:00
|
|
|
struct iscsi_cls_conn *
|
2008-05-21 20:54:01 +00:00
|
|
|
iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
|
|
|
|
uint32_t conn_idx)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2008-05-21 20:53:59 +00:00
|
|
|
struct iscsi_session *session = cls_session->dd_data;
|
2006-04-07 02:13:41 +00:00
|
|
|
struct iscsi_conn *conn;
|
|
|
|
struct iscsi_cls_conn *cls_conn;
|
2006-05-19 01:31:34 +00:00
|
|
|
char *data;
|
2022-03-10 01:57:58 +00:00
|
|
|
int err;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2022-03-10 01:57:58 +00:00
|
|
|
cls_conn = iscsi_alloc_conn(cls_session, sizeof(*conn) + dd_size,
|
2008-05-21 20:54:01 +00:00
|
|
|
conn_idx);
|
2006-04-07 02:13:41 +00:00
|
|
|
if (!cls_conn)
|
|
|
|
return NULL;
|
|
|
|
conn = cls_conn->dd_data;
|
|
|
|
|
2008-05-21 20:54:01 +00:00
|
|
|
conn->dd_data = cls_conn->dd_data + sizeof(*conn);
|
2006-04-07 02:13:41 +00:00
|
|
|
conn->session = session;
|
|
|
|
conn->cls_conn = cls_conn;
|
|
|
|
conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
|
|
|
|
conn->id = conn_idx;
|
|
|
|
conn->exp_statsn = 0;
|
2007-12-13 18:43:30 +00:00
|
|
|
|
2017-10-11 23:25:40 +00:00
|
|
|
timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0);
|
2007-12-13 18:43:30 +00:00
|
|
|
|
2007-12-13 18:43:20 +00:00
|
|
|
INIT_LIST_HEAD(&conn->mgmtqueue);
|
2009-05-13 22:57:46 +00:00
|
|
|
INIT_LIST_HEAD(&conn->cmdqueue);
|
2007-12-13 18:43:20 +00:00
|
|
|
INIT_LIST_HEAD(&conn->requeue);
|
2006-11-22 14:57:56 +00:00
|
|
|
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
/* allocate login_task used for the login/text sequences */
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2009-12-21 22:37:28 +00:00
|
|
|
if (!kfifo_out(&session->cmdpool.queue,
|
2008-05-21 20:54:09 +00:00
|
|
|
(void*)&conn->login_task,
|
2006-04-07 02:13:41 +00:00
|
|
|
sizeof(void*))) {
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2008-05-21 20:54:09 +00:00
|
|
|
goto login_task_alloc_fail;
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-12-02 06:32:09 +00:00
|
|
|
data = (char *) __get_free_pages(GFP_KERNEL,
|
|
|
|
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
|
2006-05-19 01:31:34 +00:00
|
|
|
if (!data)
|
2008-05-21 20:54:09 +00:00
|
|
|
goto login_task_data_alloc_fail;
|
|
|
|
conn->login_task->data = conn->data = data;
|
2006-05-19 01:31:34 +00:00
|
|
|
|
2022-03-10 01:57:58 +00:00
|
|
|
err = iscsi_add_conn(cls_conn);
|
|
|
|
if (err)
|
|
|
|
goto login_task_add_dev_fail;
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
return cls_conn;
|
|
|
|
|
2022-03-10 01:57:58 +00:00
|
|
|
login_task_add_dev_fail:
|
|
|
|
free_pages((unsigned long) conn->data,
|
|
|
|
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
|
|
|
|
|
2008-05-21 20:54:09 +00:00
|
|
|
login_task_data_alloc_fail:
|
2009-12-21 22:37:28 +00:00
|
|
|
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
|
2006-05-19 01:31:34 +00:00
|
|
|
sizeof(void*));
|
2008-05-21 20:54:09 +00:00
|
|
|
login_task_alloc_fail:
|
2022-03-10 01:57:58 +00:00
|
|
|
iscsi_put_conn(cls_conn);
|
2006-04-07 02:13:41 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_setup);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iscsi_conn_teardown - teardown iscsi connection
|
2017-12-22 22:08:27 +00:00
|
|
|
* @cls_conn: iscsi class connection
|
2006-04-07 02:13:41 +00:00
|
|
|
*
|
|
|
|
* TODO: we may need to make this into a two step process
|
|
|
|
* like scsi-mls remove + put host
|
|
|
|
*/
|
|
|
|
void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
|
|
|
|
{
|
|
|
|
struct iscsi_conn *conn = cls_conn->dd_data;
|
|
|
|
struct iscsi_session *session = conn->session;
|
2022-03-10 01:57:59 +00:00
|
|
|
|
|
|
|
iscsi_remove_conn(cls_conn);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2007-12-13 18:43:30 +00:00
|
|
|
del_timer_sync(&conn->transport_timer);
|
|
|
|
|
2015-06-24 01:11:58 +00:00
|
|
|
mutex_lock(&session->eh_mutex);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
|
|
|
|
if (session->leadconn == conn) {
|
|
|
|
/*
|
|
|
|
* leading connection? then give up on recovery.
|
|
|
|
*/
|
|
|
|
session->state = ISCSI_STATE_TERMINATE;
|
2021-05-25 18:18:06 +00:00
|
|
|
wake_up(&session->ehwait);
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2007-02-28 23:32:15 +00:00
|
|
|
/* flush queued up work because we free the connection below */
|
2007-12-13 18:43:20 +00:00
|
|
|
iscsi_suspend_tx(conn);
|
2007-02-28 23:32:15 +00:00
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2008-12-02 06:32:09 +00:00
|
|
|
free_pages((unsigned long) conn->data,
|
|
|
|
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
|
2022-03-10 01:57:59 +00:00
|
|
|
kfree(conn->persistent_address);
|
|
|
|
kfree(conn->local_ipaddr);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
/* regular RX path uses back_lock */
|
|
|
|
spin_lock_bh(&session->back_lock);
|
2009-12-21 22:37:28 +00:00
|
|
|
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
|
2006-04-07 02:13:41 +00:00
|
|
|
sizeof(void*));
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->back_lock);
|
2007-07-26 17:46:48 +00:00
|
|
|
if (session->leadconn == conn)
|
2006-04-07 02:13:41 +00:00
|
|
|
session->leadconn = NULL;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2015-06-24 01:11:58 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2022-03-10 01:57:59 +00:00
|
|
|
iscsi_put_conn(cls_conn);
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
|
|
|
|
|
|
|
|
int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
|
|
|
|
{
|
|
|
|
struct iscsi_conn *conn = cls_conn->dd_data;
|
|
|
|
struct iscsi_session *session = conn->session;
|
|
|
|
|
2006-08-31 22:09:24 +00:00
|
|
|
if (!session) {
|
2008-01-31 19:36:52 +00:00
|
|
|
iscsi_conn_printk(KERN_ERR, conn,
|
|
|
|
"can't start unbound connection\n");
|
2006-04-07 02:13:41 +00:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2006-08-31 22:09:31 +00:00
|
|
|
if ((session->imm_data_en || !session->initial_r2t_en) &&
|
|
|
|
session->first_burst > session->max_burst) {
|
2008-01-31 19:36:52 +00:00
|
|
|
iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
|
|
|
|
"first_burst %d max_burst %d\n",
|
|
|
|
session->first_burst, session->max_burst);
|
2006-08-31 22:09:24 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2007-12-13 18:43:30 +00:00
|
|
|
if (conn->ping_timeout && !conn->recv_timeout) {
|
2008-01-31 19:36:52 +00:00
|
|
|
iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
|
|
|
|
"zero. Using 5 seconds\n.");
|
2007-12-13 18:43:30 +00:00
|
|
|
conn->recv_timeout = 5;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn->recv_timeout && !conn->ping_timeout) {
|
2008-01-31 19:36:52 +00:00
|
|
|
iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
|
|
|
|
"zero. Using 5 seconds.\n");
|
2007-12-13 18:43:30 +00:00
|
|
|
conn->ping_timeout = 5;
|
|
|
|
}
|
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
conn->c_stage = ISCSI_CONN_STARTED;
|
|
|
|
session->state = ISCSI_STATE_LOGGED_IN;
|
2007-07-26 17:46:48 +00:00
|
|
|
session->queued_cmdsn = session->cmdsn;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2007-12-13 18:43:30 +00:00
|
|
|
conn->last_recv = jiffies;
|
|
|
|
conn->last_ping = jiffies;
|
|
|
|
if (conn->recv_timeout && conn->ping_timeout)
|
|
|
|
mod_timer(&conn->transport_timer,
|
|
|
|
jiffies + (conn->recv_timeout * HZ));
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
switch(conn->stop_stage) {
|
|
|
|
case STOP_CONN_RECOVER:
|
|
|
|
/*
|
|
|
|
* unblock eh_abort() if it is blocked. re-try all
|
|
|
|
* commands after successful recovery
|
|
|
|
*/
|
|
|
|
conn->stop_stage = 0;
|
2021-05-25 18:18:06 +00:00
|
|
|
session->tmf_state = TMF_INITIAL;
|
2006-04-07 02:13:41 +00:00
|
|
|
session->age++;
|
2008-01-31 19:36:53 +00:00
|
|
|
if (session->age == 16)
|
|
|
|
session->age = 0;
|
2008-01-31 19:36:43 +00:00
|
|
|
break;
|
2006-04-07 02:13:41 +00:00
|
|
|
case STOP_CONN_TERM:
|
|
|
|
conn->stop_stage = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2008-05-21 20:53:59 +00:00
|
|
|
iscsi_unblock_session(session->cls_session);
|
2021-05-25 18:18:06 +00:00
|
|
|
wake_up(&session->ehwait);
|
2006-04-07 02:13:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_start);
|
|
|
|
|
|
|
|
static void
|
2009-05-13 22:57:46 +00:00
|
|
|
fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2009-05-13 22:57:46 +00:00
|
|
|
struct iscsi_task *task;
|
2009-05-13 22:57:49 +00:00
|
|
|
int i, state;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2009-05-13 22:57:46 +00:00
|
|
|
for (i = 0; i < conn->session->cmds_max; i++) {
|
|
|
|
task = conn->session->cmds[i];
|
|
|
|
if (task->sc)
|
|
|
|
continue;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2009-05-13 22:57:46 +00:00
|
|
|
if (task->state == ISCSI_TASK_FREE)
|
|
|
|
continue;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2009-05-13 22:57:46 +00:00
|
|
|
ISCSI_DBG_SESSION(conn->session,
|
|
|
|
"failing mgmt itt 0x%x state %d\n",
|
|
|
|
task->itt, task->state);
|
2021-02-07 04:46:01 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&session->back_lock);
|
|
|
|
if (cleanup_queued_task(task)) {
|
|
|
|
spin_unlock_bh(&session->back_lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-05-13 22:57:49 +00:00
|
|
|
state = ISCSI_TASK_ABRT_SESS_RECOV;
|
|
|
|
if (task->state == ISCSI_TASK_PENDING)
|
|
|
|
state = ISCSI_TASK_COMPLETED;
|
|
|
|
iscsi_complete_task(task, state);
|
2019-02-25 17:41:30 +00:00
|
|
|
spin_unlock_bh(&session->back_lock);
|
2009-05-13 22:57:46 +00:00
|
|
|
}
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
|
2021-04-06 17:17:46 +00:00
|
|
|
void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
|
2006-04-07 02:13:41 +00:00
|
|
|
{
|
2021-04-06 17:17:46 +00:00
|
|
|
struct iscsi_conn *conn = cls_conn->dd_data;
|
|
|
|
struct iscsi_session *session = conn->session;
|
2006-05-03 00:46:40 +00:00
|
|
|
int old_stop_stage;
|
|
|
|
|
2007-08-15 06:38:30 +00:00
|
|
|
mutex_lock(&session->eh_mutex);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2006-05-03 00:46:40 +00:00
|
|
|
if (conn->stop_stage == STOP_CONN_TERM) {
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-08-15 06:38:30 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-05-03 00:46:40 +00:00
|
|
|
/*
|
|
|
|
* When this is called for the in_login state, we only want to clean
|
2008-05-21 20:54:09 +00:00
|
|
|
* up the login task and connection. We do not need to block and set
|
2006-05-30 05:37:20 +00:00
|
|
|
* the recovery state again
|
2006-05-03 00:46:40 +00:00
|
|
|
*/
|
2006-05-30 05:37:20 +00:00
|
|
|
if (flag == STOP_CONN_TERM)
|
|
|
|
session->state = ISCSI_STATE_TERMINATE;
|
|
|
|
else if (conn->stop_stage != STOP_CONN_RECOVER)
|
|
|
|
session->state = ISCSI_STATE_IN_RECOVERY;
|
2010-03-09 20:14:51 +00:00
|
|
|
|
|
|
|
old_stop_stage = conn->stop_stage;
|
|
|
|
conn->stop_stage = flag;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2006-05-03 00:46:40 +00:00
|
|
|
|
2009-05-13 22:57:43 +00:00
|
|
|
del_timer_sync(&conn->transport_timer);
|
|
|
|
iscsi_suspend_tx(conn);
|
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2006-05-30 05:37:20 +00:00
|
|
|
conn->c_stage = ISCSI_CONN_STOPPED;
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-08-15 06:38:30 +00:00
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
/*
|
|
|
|
* for connection level recovery we should not calculate
|
|
|
|
* header digest. conn->hdr_size used for optimization
|
|
|
|
* in hdr_extract() and will be re-negotiated at
|
|
|
|
* set_param() time.
|
|
|
|
*/
|
|
|
|
if (flag == STOP_CONN_RECOVER) {
|
|
|
|
conn->hdrdgst_en = 0;
|
|
|
|
conn->datadgst_en = 0;
|
2006-05-19 01:31:42 +00:00
|
|
|
if (session->state == ISCSI_STATE_IN_RECOVERY &&
|
2006-05-30 05:37:20 +00:00
|
|
|
old_stop_stage != STOP_CONN_RECOVER) {
|
2009-03-05 20:45:58 +00:00
|
|
|
ISCSI_DBG_SESSION(session, "blocking session\n");
|
2008-05-21 20:53:59 +00:00
|
|
|
iscsi_block_session(session->cls_session);
|
2006-05-30 05:37:20 +00:00
|
|
|
}
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
2006-05-19 01:31:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* flush queues.
|
|
|
|
*/
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2009-05-13 22:57:49 +00:00
|
|
|
fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
|
2009-05-13 22:57:46 +00:00
|
|
|
fail_mgmt_tasks(session, conn);
|
2021-05-25 18:18:06 +00:00
|
|
|
memset(&session->tmhdr, 0, sizeof(session->tmhdr));
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2007-08-15 06:38:30 +00:00
|
|
|
mutex_unlock(&session->eh_mutex);
|
2006-04-07 02:13:41 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_stop);
|
|
|
|
|
|
|
|
int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
|
|
|
|
struct iscsi_cls_conn *cls_conn, int is_leading)
|
|
|
|
{
|
2008-05-21 20:53:59 +00:00
|
|
|
struct iscsi_session *session = cls_session->dd_data;
|
2006-10-16 22:09:39 +00:00
|
|
|
struct iscsi_conn *conn = cls_conn->dd_data;
|
2006-04-07 02:13:41 +00:00
|
|
|
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_lock_bh(&session->frwd_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
if (is_leading)
|
|
|
|
session->leadconn = conn;
|
2022-04-08 00:13:12 +00:00
|
|
|
|
|
|
|
set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
|
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 06:41:38 +00:00
|
|
|
spin_unlock_bh(&session->frwd_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
|
2021-02-07 04:46:06 +00:00
|
|
|
/*
|
|
|
|
* The target could have reduced it's window size between logins, so
|
|
|
|
* we have to reset max/exp cmdsn so we can see the new values.
|
|
|
|
*/
|
|
|
|
spin_lock_bh(&session->back_lock);
|
|
|
|
session->max_cmdsn = session->exp_cmdsn = session->cmdsn + 1;
|
|
|
|
spin_unlock_bh(&session->back_lock);
|
2006-04-07 02:13:41 +00:00
|
|
|
/*
|
|
|
|
* Unblock xmitworker(), Login Phase will pass through.
|
|
|
|
*/
|
2022-04-08 00:13:11 +00:00
|
|
|
clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
|
|
|
|
clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
|
2006-04-07 02:13:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_bind);
|
|
|
|
|
2013-03-22 11:41:30 +00:00
|
|
|
int iscsi_switch_str_param(char **param, char *new_val_buf)
|
2009-05-13 22:57:40 +00:00
|
|
|
{
|
|
|
|
char *new_val;
|
|
|
|
|
|
|
|
if (*param) {
|
|
|
|
if (!strcmp(*param, new_val_buf))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_val = kstrdup(new_val_buf, GFP_NOIO);
|
|
|
|
if (!new_val)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
kfree(*param);
|
|
|
|
*param = new_val;
|
|
|
|
return 0;
|
|
|
|
}
|
2013-03-22 11:41:30 +00:00
|
|
|
EXPORT_SYMBOL_GPL(iscsi_switch_str_param);
|
2006-06-28 17:00:23 +00:00
|
|
|
|
|
|
|
int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
|
|
|
|
enum iscsi_param param, char *buf, int buflen)
|
|
|
|
{
|
|
|
|
struct iscsi_conn *conn = cls_conn->dd_data;
|
|
|
|
struct iscsi_session *session = conn->session;
|
2013-08-08 10:44:29 +00:00
|
|
|
int val;
|
2006-06-28 17:00:23 +00:00
|
|
|
|
|
|
|
switch(param) {
|
2007-12-13 18:43:20 +00:00
|
|
|
case ISCSI_PARAM_FAST_ABORT:
|
|
|
|
sscanf(buf, "%d", &session->fast_abort);
|
|
|
|
break;
|
2007-12-13 18:43:30 +00:00
|
|
|
case ISCSI_PARAM_ABORT_TMO:
|
|
|
|
sscanf(buf, "%d", &session->abort_timeout);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_LU_RESET_TMO:
|
|
|
|
sscanf(buf, "%d", &session->lu_reset_timeout);
|
|
|
|
break;
|
2009-11-11 22:34:33 +00:00
|
|
|
case ISCSI_PARAM_TGT_RESET_TMO:
|
|
|
|
sscanf(buf, "%d", &session->tgt_reset_timeout);
|
|
|
|
break;
|
2007-12-13 18:43:30 +00:00
|
|
|
case ISCSI_PARAM_PING_TMO:
|
|
|
|
sscanf(buf, "%d", &conn->ping_timeout);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_RECV_TMO:
|
|
|
|
sscanf(buf, "%d", &conn->recv_timeout);
|
|
|
|
break;
|
2006-06-28 17:00:23 +00:00
|
|
|
case ISCSI_PARAM_MAX_RECV_DLENGTH:
|
|
|
|
sscanf(buf, "%d", &conn->max_recv_dlength);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
|
|
|
|
sscanf(buf, "%d", &conn->max_xmit_dlength);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_HDRDGST_EN:
|
|
|
|
sscanf(buf, "%d", &conn->hdrdgst_en);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_DATADGST_EN:
|
|
|
|
sscanf(buf, "%d", &conn->datadgst_en);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_INITIAL_R2T_EN:
|
|
|
|
sscanf(buf, "%d", &session->initial_r2t_en);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_MAX_R2T:
|
2012-01-27 03:13:10 +00:00
|
|
|
sscanf(buf, "%hu", &session->max_r2t);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_IMM_DATA_EN:
|
|
|
|
sscanf(buf, "%d", &session->imm_data_en);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_FIRST_BURST:
|
|
|
|
sscanf(buf, "%d", &session->first_burst);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_MAX_BURST:
|
|
|
|
sscanf(buf, "%d", &session->max_burst);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_PDU_INORDER_EN:
|
|
|
|
sscanf(buf, "%d", &session->pdu_inorder_en);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_DATASEQ_INORDER_EN:
|
|
|
|
sscanf(buf, "%d", &session->dataseq_inorder_en);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_ERL:
|
|
|
|
sscanf(buf, "%d", &session->erl);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_EXP_STATSN:
|
|
|
|
sscanf(buf, "%u", &conn->exp_statsn);
|
|
|
|
break;
|
2007-05-30 17:57:16 +00:00
|
|
|
case ISCSI_PARAM_USERNAME:
|
2009-05-13 22:57:40 +00:00
|
|
|
return iscsi_switch_str_param(&session->username, buf);
|
2007-05-30 17:57:16 +00:00
|
|
|
case ISCSI_PARAM_USERNAME_IN:
|
2009-05-13 22:57:40 +00:00
|
|
|
return iscsi_switch_str_param(&session->username_in, buf);
|
2007-05-30 17:57:16 +00:00
|
|
|
case ISCSI_PARAM_PASSWORD:
|
2009-05-13 22:57:40 +00:00
|
|
|
return iscsi_switch_str_param(&session->password, buf);
|
2007-05-30 17:57:16 +00:00
|
|
|
case ISCSI_PARAM_PASSWORD_IN:
|
2009-05-13 22:57:40 +00:00
|
|
|
return iscsi_switch_str_param(&session->password_in, buf);
|
2006-06-28 17:00:23 +00:00
|
|
|
case ISCSI_PARAM_TARGET_NAME:
|
2009-05-13 22:57:40 +00:00
|
|
|
return iscsi_switch_str_param(&session->targetname, buf);
|
2012-01-19 11:06:53 +00:00
|
|
|
case ISCSI_PARAM_TARGET_ALIAS:
|
|
|
|
return iscsi_switch_str_param(&session->targetalias, buf);
|
2006-06-28 17:00:23 +00:00
|
|
|
case ISCSI_PARAM_TPGT:
|
|
|
|
sscanf(buf, "%d", &session->tpgt);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_PERSISTENT_PORT:
|
|
|
|
sscanf(buf, "%d", &conn->persistent_port);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_PERSISTENT_ADDRESS:
|
2009-05-13 22:57:40 +00:00
|
|
|
return iscsi_switch_str_param(&conn->persistent_address, buf);
|
2008-05-21 20:54:16 +00:00
|
|
|
case ISCSI_PARAM_IFACE_NAME:
|
2009-05-13 22:57:40 +00:00
|
|
|
return iscsi_switch_str_param(&session->ifacename, buf);
|
2008-05-21 20:54:16 +00:00
|
|
|
case ISCSI_PARAM_INITIATOR_NAME:
|
2009-05-13 22:57:40 +00:00
|
|
|
return iscsi_switch_str_param(&session->initiatorname, buf);
|
2013-06-20 17:21:26 +00:00
|
|
|
case ISCSI_PARAM_BOOT_ROOT:
|
|
|
|
return iscsi_switch_str_param(&session->boot_root, buf);
|
|
|
|
case ISCSI_PARAM_BOOT_NIC:
|
|
|
|
return iscsi_switch_str_param(&session->boot_nic, buf);
|
|
|
|
case ISCSI_PARAM_BOOT_TARGET:
|
|
|
|
return iscsi_switch_str_param(&session->boot_target, buf);
|
2013-07-01 09:54:12 +00:00
|
|
|
case ISCSI_PARAM_PORTAL_TYPE:
|
|
|
|
return iscsi_switch_str_param(&session->portal_type, buf);
|
|
|
|
case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
|
|
|
|
return iscsi_switch_str_param(&session->discovery_parent_type,
|
|
|
|
buf);
|
2013-08-08 10:44:29 +00:00
|
|
|
case ISCSI_PARAM_DISCOVERY_SESS:
|
|
|
|
sscanf(buf, "%d", &val);
|
|
|
|
session->discovery_sess = !!val;
|
|
|
|
break;
|
2013-11-22 10:28:21 +00:00
|
|
|
case ISCSI_PARAM_LOCAL_IPADDR:
|
|
|
|
return iscsi_switch_str_param(&conn->local_ipaddr, buf);
|
2006-06-28 17:00:23 +00:00
|
|
|
default:
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_set_param);
|
|
|
|
|
|
|
|
int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
|
|
|
|
enum iscsi_param param, char *buf)
|
|
|
|
{
|
2008-05-21 20:53:59 +00:00
|
|
|
struct iscsi_session *session = cls_session->dd_data;
|
2006-06-28 17:00:23 +00:00
|
|
|
int len;
|
|
|
|
|
|
|
|
switch(param) {
|
2007-12-13 18:43:20 +00:00
|
|
|
case ISCSI_PARAM_FAST_ABORT:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->fast_abort);
|
2007-12-13 18:43:20 +00:00
|
|
|
break;
|
2007-12-13 18:43:30 +00:00
|
|
|
case ISCSI_PARAM_ABORT_TMO:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->abort_timeout);
|
2007-12-13 18:43:30 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_LU_RESET_TMO:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout);
|
2007-12-13 18:43:30 +00:00
|
|
|
break;
|
2009-11-11 22:34:33 +00:00
|
|
|
case ISCSI_PARAM_TGT_RESET_TMO:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout);
|
2009-11-11 22:34:33 +00:00
|
|
|
break;
|
2006-06-28 17:00:23 +00:00
|
|
|
case ISCSI_PARAM_INITIAL_R2T_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->initial_r2t_en);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_MAX_R2T:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%hu\n", session->max_r2t);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_IMM_DATA_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->imm_data_en);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_FIRST_BURST:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", session->first_burst);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_MAX_BURST:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", session->max_burst);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_PDU_INORDER_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_DATASEQ_INORDER_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
2013-07-22 11:46:10 +00:00
|
|
|
case ISCSI_PARAM_DEF_TASKMGMT_TMO:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo);
|
2013-07-22 11:46:10 +00:00
|
|
|
break;
|
2006-06-28 17:00:23 +00:00
|
|
|
case ISCSI_PARAM_ERL:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->erl);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_TARGET_NAME:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->targetname);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
2012-01-19 11:06:53 +00:00
|
|
|
case ISCSI_PARAM_TARGET_ALIAS:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->targetalias);
|
2012-01-19 11:06:53 +00:00
|
|
|
break;
|
2006-06-28 17:00:23 +00:00
|
|
|
case ISCSI_PARAM_TPGT:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->tpgt);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
2007-05-30 17:57:16 +00:00
|
|
|
case ISCSI_PARAM_USERNAME:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->username);
|
2007-05-30 17:57:16 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_USERNAME_IN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->username_in);
|
2007-05-30 17:57:16 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_PASSWORD:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->password);
|
2007-05-30 17:57:16 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_PASSWORD_IN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->password_in);
|
2007-05-30 17:57:16 +00:00
|
|
|
break;
|
2008-05-21 20:54:16 +00:00
|
|
|
case ISCSI_PARAM_IFACE_NAME:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->ifacename);
|
2008-05-21 20:54:16 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_INITIATOR_NAME:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->initiatorname);
|
2008-05-21 20:54:16 +00:00
|
|
|
break;
|
2013-06-20 17:21:26 +00:00
|
|
|
case ISCSI_PARAM_BOOT_ROOT:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->boot_root);
|
2013-06-20 17:21:26 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_BOOT_NIC:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->boot_nic);
|
2013-06-20 17:21:26 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_BOOT_TARGET:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->boot_target);
|
2013-07-22 11:46:09 +00:00
|
|
|
break;
|
2013-07-01 09:54:12 +00:00
|
|
|
case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_DISCOVERY_SESS:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", session->discovery_sess);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_PORTAL_TYPE:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", session->portal_type);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_CHAP_AUTH_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", session->chap_auth_en);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", session->discovery_logout_en);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_BIDI_CHAP_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", session->bidi_chap_en);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_DEF_TIME2WAIT:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->time2wait);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_DEF_TIME2RETAIN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", session->time2retain);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_TSID:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", session->tsid);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_ISID:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n",
|
2013-07-01 09:54:12 +00:00
|
|
|
session->isid[0], session->isid[1],
|
|
|
|
session->isid[2], session->isid[3],
|
|
|
|
session->isid[4], session->isid[5]);
|
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
|
|
|
|
if (session->discovery_parent_type)
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n",
|
2013-07-01 09:54:12 +00:00
|
|
|
session->discovery_parent_type);
|
|
|
|
else
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "\n");
|
2013-06-20 17:21:26 +00:00
|
|
|
break;
|
2006-06-28 17:00:23 +00:00
|
|
|
default:
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_session_get_param);
|
|
|
|
|
2011-02-16 21:04:35 +00:00
|
|
|
int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
|
|
|
|
enum iscsi_param param, char *buf)
|
|
|
|
{
|
|
|
|
struct sockaddr_in6 *sin6 = NULL;
|
|
|
|
struct sockaddr_in *sin = NULL;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
switch (addr->ss_family) {
|
|
|
|
case AF_INET:
|
|
|
|
sin = (struct sockaddr_in *)addr;
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
sin6 = (struct sockaddr_in6 *)addr;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (param) {
|
|
|
|
case ISCSI_PARAM_CONN_ADDRESS:
|
|
|
|
case ISCSI_HOST_PARAM_IPADDRESS:
|
|
|
|
if (sin)
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr);
|
2011-02-16 21:04:35 +00:00
|
|
|
else
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr);
|
2011-02-16 21:04:35 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_CONN_PORT:
|
2014-09-29 18:55:42 +00:00
|
|
|
case ISCSI_PARAM_LOCAL_PORT:
|
2011-02-16 21:04:35 +00:00
|
|
|
if (sin)
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port));
|
2011-02-16 21:04:35 +00:00
|
|
|
else
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%hu\n",
|
2011-02-16 21:04:35 +00:00
|
|
|
be16_to_cpu(sin6->sin6_port));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
|
|
|
|
|
2006-06-28 17:00:23 +00:00
|
|
|
int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
|
|
|
|
enum iscsi_param param, char *buf)
|
|
|
|
{
|
|
|
|
struct iscsi_conn *conn = cls_conn->dd_data;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
switch(param) {
|
2007-12-13 18:43:30 +00:00
|
|
|
case ISCSI_PARAM_PING_TMO:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->ping_timeout);
|
2007-12-13 18:43:30 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_RECV_TMO:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->recv_timeout);
|
2007-12-13 18:43:30 +00:00
|
|
|
break;
|
2006-06-28 17:00:23 +00:00
|
|
|
case ISCSI_PARAM_MAX_RECV_DLENGTH:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_HDRDGST_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_DATADGST_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", conn->datadgst_en);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_IFMARKER_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", conn->ifmarker_en);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_OFMARKER_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", conn->ofmarker_en);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_EXP_STATSN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->exp_statsn);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_PERSISTENT_PORT:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%d\n", conn->persistent_port);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_PERSISTENT_ADDRESS:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", conn->persistent_address);
|
2006-06-28 17:00:23 +00:00
|
|
|
break;
|
2013-07-01 09:54:12 +00:00
|
|
|
case ISCSI_PARAM_STATSN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->statsn);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_MAX_SEGMENT_SIZE:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->max_segment_size);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_KEEPALIVE_TMO:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_LOCAL_PORT:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->local_port);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_TCP_NAGLE_DISABLE:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_TCP_WSF_DISABLE:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_TCP_TIMER_SCALE:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_TCP_TIMESTAMP_EN:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->fragment_disable);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_IPV4_TOS:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->ipv4_tos);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_IPV6_TC:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
2013-07-22 11:46:10 +00:00
|
|
|
case ISCSI_PARAM_IPV6_FLOW_LABEL:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label);
|
2013-07-22 11:46:10 +00:00
|
|
|
break;
|
2013-07-01 09:54:12 +00:00
|
|
|
case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_TCP_XMIT_WSF:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
|
|
|
case ISCSI_PARAM_TCP_RECV_WSF:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf);
|
2013-07-01 09:54:12 +00:00
|
|
|
break;
|
2013-11-22 10:28:21 +00:00
|
|
|
case ISCSI_PARAM_LOCAL_IPADDR:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", conn->local_ipaddr);
|
2013-11-22 10:28:21 +00:00
|
|
|
break;
|
2006-06-28 17:00:23 +00:00
|
|
|
default:
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
|
|
|
|
|
2007-05-30 17:57:12 +00:00
|
|
|
int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
|
|
|
|
char *buf)
|
|
|
|
{
|
2008-05-21 20:53:59 +00:00
|
|
|
struct iscsi_host *ihost = shost_priv(shost);
|
2007-05-30 17:57:12 +00:00
|
|
|
int len;
|
|
|
|
|
|
|
|
switch (param) {
|
[SCSI] iscsi class, iscsi_tcp, iser, qla4xxx: add netdevname sysfs attr
iSCSI must support software iscsi (iscsi_tcp, iser), hardware iscsi (qla4xxx),
and partial offload (broadcom). To be able to allow each stack or driver
or port (virtual or physical) to be able to log into the same target portal
we use the initiator tuple [[HWADDRESS | NETDEVNAME], INITIATOR_NAME] and
the target tuple [TARGETNAME, CONN_ADDRESS, CONN_PORT] to id a session.
This patch adds the netdev name, which is used by software iscsi when
it binds a session to a netdevice using the SO_BINDTODEVICE sock opt.
It cannot use HWADDRESS because if someone did vlans then the same netdevice
will have the same mac and the initiator,target id will not be unique.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: David C Somayajulu <david.somayajulu@qlogic.com>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2007-05-30 17:57:25 +00:00
|
|
|
case ISCSI_HOST_PARAM_NETDEV_NAME:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", ihost->netdev);
|
[SCSI] iscsi class, iscsi_tcp, iser, qla4xxx: add netdevname sysfs attr
iSCSI must support software iscsi (iscsi_tcp, iser), hardware iscsi (qla4xxx),
and partial offload (broadcom). To be able to allow each stack or driver
or port (virtual or physical) to be able to log into the same target portal
we use the initiator tuple [[HWADDRESS | NETDEVNAME], INITIATOR_NAME] and
the target tuple [TARGETNAME, CONN_ADDRESS, CONN_PORT] to id a session.
This patch adds the netdev name, which is used by software iscsi when
it binds a session to a netdevice using the SO_BINDTODEVICE sock opt.
It cannot use HWADDRESS because if someone did vlans then the same netdevice
will have the same mac and the initiator,target id will not be unique.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: David C Somayajulu <david.somayajulu@qlogic.com>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2007-05-30 17:57:25 +00:00
|
|
|
break;
|
2007-05-30 17:57:12 +00:00
|
|
|
case ISCSI_HOST_PARAM_HWADDRESS:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", ihost->hwaddress);
|
2007-05-30 17:57:12 +00:00
|
|
|
break;
|
2007-05-30 17:57:13 +00:00
|
|
|
case ISCSI_HOST_PARAM_INITIATOR_NAME:
|
2021-02-24 02:00:17 +00:00
|
|
|
len = sysfs_emit(buf, "%s\n", ihost->initiatorname);
|
2007-05-30 17:57:13 +00:00
|
|
|
break;
|
2007-05-30 17:57:12 +00:00
|
|
|
default:
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_host_get_param);
|
|
|
|
|
|
|
|
int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
|
|
|
|
char *buf, int buflen)
|
|
|
|
{
|
2008-05-21 20:53:59 +00:00
|
|
|
struct iscsi_host *ihost = shost_priv(shost);
|
2007-05-30 17:57:12 +00:00
|
|
|
|
|
|
|
switch (param) {
|
[SCSI] iscsi class, iscsi_tcp, iser, qla4xxx: add netdevname sysfs attr
iSCSI must support software iscsi (iscsi_tcp, iser), hardware iscsi (qla4xxx),
and partial offload (broadcom). To be able to allow each stack or driver
or port (virtual or physical) to be able to log into the same target portal
we use the initiator tuple [[HWADDRESS | NETDEVNAME], INITIATOR_NAME] and
the target tuple [TARGETNAME, CONN_ADDRESS, CONN_PORT] to id a session.
This patch adds the netdev name, which is used by software iscsi when
it binds a session to a netdevice using the SO_BINDTODEVICE sock opt.
It cannot use HWADDRESS because if someone did vlans then the same netdevice
will have the same mac and the initiator,target id will not be unique.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: David C Somayajulu <david.somayajulu@qlogic.com>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2007-05-30 17:57:25 +00:00
|
|
|
case ISCSI_HOST_PARAM_NETDEV_NAME:
|
2009-05-13 22:57:40 +00:00
|
|
|
return iscsi_switch_str_param(&ihost->netdev, buf);
|
2007-05-30 17:57:12 +00:00
|
|
|
case ISCSI_HOST_PARAM_HWADDRESS:
|
2009-05-13 22:57:40 +00:00
|
|
|
return iscsi_switch_str_param(&ihost->hwaddress, buf);
|
2007-05-30 17:57:13 +00:00
|
|
|
case ISCSI_HOST_PARAM_INITIATOR_NAME:
|
2009-05-13 22:57:40 +00:00
|
|
|
return iscsi_switch_str_param(&ihost->initiatorname, buf);
|
2007-05-30 17:57:12 +00:00
|
|
|
default:
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iscsi_host_set_param);
|
|
|
|
|
2006-04-07 02:13:41 +00:00
|
|
|
MODULE_AUTHOR("Mike Christie");
|
|
|
|
MODULE_DESCRIPTION("iSCSI library functions");
|
|
|
|
MODULE_LICENSE("GPL");
|