cifs: avoid use of global locks for high contention data

During analysis of multichannel perf, it was seen that
the global locks cifs_tcp_ses_lock and GlobalMid_Lock, which
were shared between various data structures were causing a
lot of contention points.

With this change, we're breaking down the use of these locks
by introducing new locks at more granular levels. i.e.
server->srv_lock, ses->ses_lock and tcon->tc_lock to protect
the unprotected fields of server, session and tcon structs;
and server->mid_lock to protect mid related lists and entries
at server level.

Signed-off-by: Shyam Prasad N <sprasad@microsoft.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
This commit is contained in:
Shyam Prasad N 2022-07-27 14:49:56 -05:00 committed by Steve French
parent 1bfa25ee30
commit d7d7a66aac
13 changed files with 336 additions and 243 deletions

View File

@ -55,7 +55,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
return; return;
cifs_dbg(VFS, "Dump pending requests:\n"); cifs_dbg(VFS, "Dump pending requests:\n");
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n", cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n",
mid_entry->mid_state, mid_entry->mid_state,
@ -78,7 +78,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
mid_entry->resp_buf, 62); mid_entry->resp_buf, 62);
} }
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
#endif /* CONFIG_CIFS_DEBUG2 */ #endif /* CONFIG_CIFS_DEBUG2 */
} }
@ -463,7 +463,7 @@ skip_rdma:
seq_printf(m, "\n\t\t[NONE]"); seq_printf(m, "\n\t\t[NONE]");
seq_puts(m, "\n\n\tMIDs: "); seq_puts(m, "\n\n\tMIDs: ");
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
seq_printf(m, "\n\tState: %d com: %d pid:" seq_printf(m, "\n\tState: %d com: %d pid:"
" %d cbdata: %p mid %llu\n", " %d cbdata: %p mid %llu\n",
@ -473,7 +473,7 @@ skip_rdma:
mid_entry->callback_data, mid_entry->callback_data,
mid_entry->mid); mid_entry->mid);
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
seq_printf(m, "\n--\n"); seq_printf(m, "\n--\n");
} }
if (c == 0) if (c == 0)

View File

@ -141,13 +141,13 @@ int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
if ((cifs_pdu == NULL) || (server == NULL)) if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL; return -EINVAL;
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) || if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
server->tcpStatus == CifsNeedNegotiate) { server->tcpStatus == CifsNeedNegotiate) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return rc; return rc;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
if (!server->session_estab) { if (!server->session_estab) {
memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8); memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);

View File

@ -731,14 +731,17 @@ static void cifs_umount_begin(struct super_block *sb)
tcon = cifs_sb_master_tcon(cifs_sb); tcon = cifs_sb_master_tcon(cifs_sb);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
spin_lock(&tcon->tc_lock);
if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
/* we have other mounts to same share or we have /* we have other mounts to same share or we have
already tried to force umount this and woken up already tried to force umount this and woken up
all waiting network requests, nothing to do */ all waiting network requests, nothing to do */
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
return; return;
} else if (tcon->tc_count == 1) } else if (tcon->tc_count == 1)
tcon->status = TID_EXITING; tcon->status = TID_EXITING;
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */

View File

@ -605,6 +605,7 @@ inc_rfc1001_len(void *buf, int count)
struct TCP_Server_Info { struct TCP_Server_Info {
struct list_head tcp_ses_list; struct list_head tcp_ses_list;
struct list_head smb_ses_list; struct list_head smb_ses_list;
spinlock_t srv_lock; /* protect anything here that is not protected */
__u64 conn_id; /* connection identifier (useful for debugging) */ __u64 conn_id; /* connection identifier (useful for debugging) */
int srv_count; /* reference counter */ int srv_count; /* reference counter */
/* 15 character server name + 0x20 16th byte indicating type = srv */ /* 15 character server name + 0x20 16th byte indicating type = srv */
@ -622,6 +623,7 @@ struct TCP_Server_Info {
#endif #endif
wait_queue_head_t response_q; wait_queue_head_t response_q;
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
spinlock_t mid_lock; /* protect mid queue and it's entries */
struct list_head pending_mid_q; struct list_head pending_mid_q;
bool noblocksnd; /* use blocking sendmsg */ bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */ bool noautotune; /* do not autotune send buf sizes */
@ -1008,6 +1010,7 @@ struct cifs_ses {
struct list_head rlist; /* reconnect list */ struct list_head rlist; /* reconnect list */
struct list_head tcon_list; struct list_head tcon_list;
struct cifs_tcon *tcon_ipc; struct cifs_tcon *tcon_ipc;
spinlock_t ses_lock; /* protect anything here that is not protected */
struct mutex session_mutex; struct mutex session_mutex;
struct TCP_Server_Info *server; /* pointer to server info */ struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */ int ses_count; /* reference counter */
@ -1169,6 +1172,7 @@ struct cifs_tcon {
struct list_head tcon_list; struct list_head tcon_list;
int tc_count; int tc_count;
struct list_head rlist; /* reconnect list */ struct list_head rlist; /* reconnect list */
spinlock_t tc_lock; /* protect anything here that is not protected */
atomic_t num_local_opens; /* num of all opens including disconnected */ atomic_t num_local_opens; /* num of all opens including disconnected */
atomic_t num_remote_opens; /* num of all network opens on server */ atomic_t num_remote_opens; /* num of all network opens on server */
struct list_head openFileList; struct list_head openFileList;
@ -1899,33 +1903,78 @@ require use of the stronger protocol */
*/ */
/**************************************************************************** /****************************************************************************
* Locking notes. All updates to global variables and lists should be * Here are all the locks (spinlock, mutex, semaphore) in cifs.ko, arranged according
* protected by spinlocks or semaphores. * to the locking order. i.e. if two locks are to be held together, the lock that
* appears higher in this list needs to be taken before the other.
* *
* Spinlocks * If you hold a lock that is lower in this list, and you need to take a higher lock
* --------- * (or if you think that one of the functions that you're calling may need to), first
* GlobalMid_Lock protects: * drop the lock you hold, pick up the higher lock, then the lower one. This will
* list operations on pending_mid_q and oplockQ * ensure that locks are picked up only in one direction in the below table
* updates to XID counters, multiplex id and SMB sequence numbers * (top to bottom).
* list operations on global DnotifyReqList
* updates to ses->status and TCP_Server_Info->tcpStatus
* updates to server->CurrentMid
* tcp_ses_lock protects:
* list operations on tcp and SMB session lists
* tcon->open_file_lock protects the list of open files hanging off the tcon
* inode->open_file_lock protects the openFileList hanging off the inode
* cfile->file_info_lock protects counters and fields in cifs file struct
* f_owner.lock protects certain per file struct operations
* mapping->page_lock protects certain per page operations
* *
* Note that the cifs_tcon.open_file_lock should be taken before * Also, if you expect a function to be called with a lock held, explicitly document
* not after the cifsInodeInfo.open_file_lock * this in the comments on top of your function definition.
* *
* Semaphores * And also, try to keep the critical sections (lock hold time) to be as minimal as
* ---------- * possible. Blocking / calling other functions with a lock held always increase
* cifsInodeInfo->lock_sem protects: * the risk of a possible deadlock.
* the list of locks held by the inode
* *
* Following this rule will avoid unnecessary deadlocks, which can get really hard to
* debug. Also, any new lock that you introduce, please add to this list in the correct
* order.
*
* Please populate this list whenever you introduce new locks in your changes. Or in
* case I've missed some existing locks. Please ensure that it's added in the list
* based on the locking order expected.
*
* =====================================================================================
* Lock Protects Initialization fn
* =====================================================================================
* vol_list_lock
* vol_info->ctx_lock vol_info->ctx
* cifs_sb_info->tlink_tree_lock cifs_sb_info->tlink_tree cifs_setup_cifs_sb
* TCP_Server_Info-> TCP_Server_Info cifs_get_tcp_session
* reconnect_mutex
* TCP_Server_Info->srv_mutex TCP_Server_Info cifs_get_tcp_session
* cifs_ses->session_mutex cifs_ses sesInfoAlloc
* cifs_tcon
* cifs_tcon->open_file_lock cifs_tcon->openFileList tconInfoAlloc
* cifs_tcon->pending_opens
* cifs_tcon->stat_lock cifs_tcon->bytes_read tconInfoAlloc
* cifs_tcon->bytes_written
* cifs_tcp_ses_lock cifs_tcp_ses_list sesInfoAlloc
* GlobalMid_Lock GlobalMaxActiveXid init_cifs
* GlobalCurrentXid
* GlobalTotalActiveXid
* TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change)
* TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session
* ->CurrentMid
* (any changes in mid_q_entry fields)
* TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session
* ->credits
* ->echo_credits
* ->oplock_credits
* ->reconnect_instance
* cifs_ses->ses_lock (anything that is not protected by another lock and can change)
* cifs_ses->iface_lock cifs_ses->iface_list sesInfoAlloc
* ->iface_count
* ->iface_last_update
* cifs_ses->chan_lock cifs_ses->chans
* ->chans_need_reconnect
* ->chans_in_reconnect
* cifs_tcon->tc_lock (anything that is not protected by another lock and can change)
* cifsInodeInfo->open_file_lock cifsInodeInfo->openFileList cifs_alloc_inode
* cifsInodeInfo->writers_lock cifsInodeInfo->writers cifsInodeInfo_alloc
* cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
* ->can_cache_brlcks
* cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc
* cached_fid->fid_mutex cifs_tcon->crfid tconInfoAlloc
* cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
* cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
* ->invalidHandle initiate_cifs_search
* ->oplock_break_cancelled
* cifs_aio_ctx->aio_mutex cifs_aio_ctx cifs_aio_ctx_alloc
****************************************************************************/ ****************************************************************************/
#ifdef DECLARE_GLOBALS_HERE #ifdef DECLARE_GLOBALS_HERE
@ -1946,9 +1995,7 @@ extern struct list_head cifs_tcp_ses_list;
/* /*
* This lock protects the cifs_tcp_ses_list, the list of smb sessions per * This lock protects the cifs_tcp_ses_list, the list of smb sessions per
* tcp session, and the list of tcon's per smb session. It also protects * tcp session, and the list of tcon's per smb session. It also protects
* the reference counters for the server, smb session, and tcon. It also * the reference counters for the server, smb session, and tcon.
* protects some fields in the TCP_Server_Info struct such as dstaddr. Finally,
* changes to the tcon->tidStatus should be done while holding this lock.
* generally the locks should be taken in order tcp_ses_lock before * generally the locks should be taken in order tcp_ses_lock before
* tcon->open_file_lock and that before file->file_info_lock since the * tcon->open_file_lock and that before file->file_info_lock since the
* structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file

View File

@ -74,13 +74,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
struct list_head *tmp1; struct list_head *tmp1;
/* only send once per connect */ /* only send once per connect */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->ses->ses_lock);
if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) { if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->ses->ses_lock);
return; return;
} }
tcon->status = TID_IN_FILES_INVALIDATE; tcon->status = TID_IN_FILES_INVALIDATE;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->ses->ses_lock);
/* list all files open on tree connection and mark them invalid */ /* list all files open on tree connection and mark them invalid */
spin_lock(&tcon->open_file_lock); spin_lock(&tcon->open_file_lock);
@ -98,10 +98,10 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid)); memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
mutex_unlock(&tcon->crfid.fid_mutex); mutex_unlock(&tcon->crfid.fid_mutex);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_FILES_INVALIDATE) if (tcon->status == TID_IN_FILES_INVALIDATE)
tcon->status = TID_NEED_TCON; tcon->status = TID_NEED_TCON;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->tc_lock);
/* /*
* BB Add call to invalidate_inodes(sb) for all superblocks mounted * BB Add call to invalidate_inodes(sb) for all superblocks mounted
@ -134,18 +134,18 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* only tree disconnect, open, and write, (and ulogoff which does not * only tree disconnect, open, and write, (and ulogoff which does not
* have tcon) are allowed as we start force umount * have tcon) are allowed as we start force umount
*/ */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock);
if (tcon->status == TID_EXITING) { if (tcon->status == TID_EXITING) {
if (smb_command != SMB_COM_WRITE_ANDX && if (smb_command != SMB_COM_WRITE_ANDX &&
smb_command != SMB_COM_OPEN_ANDX && smb_command != SMB_COM_OPEN_ANDX &&
smb_command != SMB_COM_TREE_DISCONNECT) { smb_command != SMB_COM_TREE_DISCONNECT) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->tc_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n", cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb_command); smb_command);
return -ENODEV; return -ENODEV;
} }
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->tc_lock);
retries = server->nr_targets; retries = server->nr_targets;
@ -165,12 +165,12 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
} }
/* are we still trying to reconnect? */ /* are we still trying to reconnect? */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsNeedReconnect) { if (server->tcpStatus != CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
break; break;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
if (retries && --retries) if (retries && --retries)
continue; continue;
@ -201,13 +201,13 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* and the server never sends an answer the socket will be closed * and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect. * and tcpStatus set to reconnect.
*/ */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect) { if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
rc = -EHOSTDOWN; rc = -EHOSTDOWN;
goto out; goto out;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
/* /*
* need to prevent multiple threads trying to simultaneously * need to prevent multiple threads trying to simultaneously

View File

@ -119,10 +119,10 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
goto requeue_resolve; goto requeue_resolve;
} }
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr, rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
strlen(ipaddr)); strlen(ipaddr));
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
kfree(ipaddr); kfree(ipaddr);
/* rc == 1 means success here */ /* rc == 1 means success here */
@ -205,17 +205,22 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
/* If server is a channel, select the primary channel */ /* If server is a channel, select the primary channel */
pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server; pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
spin_lock(&cifs_tcp_ses_lock); spin_lock(&pserver->srv_lock);
if (!all_channels) { if (!all_channels) {
pserver->tcpStatus = CifsNeedReconnect; pserver->tcpStatus = CifsNeedReconnect;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&pserver->srv_lock);
return; return;
} }
spin_unlock(&pserver->srv_lock);
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
spin_lock(&ses->chan_lock); spin_lock(&ses->chan_lock);
for (i = 0; i < ses->chan_count; i++) for (i = 0; i < ses->chan_count; i++) {
spin_lock(&ses->chans[i].server->srv_lock);
ses->chans[i].server->tcpStatus = CifsNeedReconnect; ses->chans[i].server->tcpStatus = CifsNeedReconnect;
spin_unlock(&ses->chans[i].server->srv_lock);
}
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
@ -252,17 +257,8 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) { list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
/* check if iface is still active */ /* check if iface is still active */
if (!cifs_chan_is_iface_active(ses, server)) { if (!cifs_chan_is_iface_active(ses, server))
/*
* HACK: drop the lock before calling
* cifs_chan_update_iface to avoid deadlock
*/
ses->ses_count++;
spin_unlock(&cifs_tcp_ses_lock);
cifs_chan_update_iface(ses, server); cifs_chan_update_iface(ses, server);
spin_lock(&cifs_tcp_ses_lock);
ses->ses_count--;
}
spin_lock(&ses->chan_lock); spin_lock(&ses->chan_lock);
if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server))
@ -323,7 +319,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
/* mark submitted MIDs for retry and issue callback */ /* mark submitted MIDs for retry and issue callback */
INIT_LIST_HEAD(&retry_list); INIT_LIST_HEAD(&retry_list);
cifs_dbg(FYI, "%s: moving mids to private list\n", __func__); cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) { list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
kref_get(&mid->refcount); kref_get(&mid->refcount);
if (mid->mid_state == MID_REQUEST_SUBMITTED) if (mid->mid_state == MID_REQUEST_SUBMITTED)
@ -331,7 +327,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
list_move(&mid->qhead, &retry_list); list_move(&mid->qhead, &retry_list);
mid->mid_flags |= MID_DELETED; mid->mid_flags |= MID_DELETED;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
cifs_server_unlock(server); cifs_server_unlock(server);
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
@ -350,11 +346,11 @@ cifs_abort_connection(struct TCP_Server_Info *server)
static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets) static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
{ {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
server->nr_targets = num_targets; server->nr_targets = num_targets;
if (server->tcpStatus == CifsExiting) { if (server->tcpStatus == CifsExiting) {
/* the demux thread will exit normally next time through the loop */ /* the demux thread will exit normally next time through the loop */
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
wake_up(&server->response_q); wake_up(&server->response_q);
return false; return false;
} }
@ -364,7 +360,7 @@ static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num
server->hostname); server->hostname);
server->tcpStatus = CifsNeedReconnect; server->tcpStatus = CifsNeedReconnect;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return true; return true;
} }
@ -414,20 +410,20 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
} else { } else {
atomic_inc(&tcpSesReconnectCount); atomic_inc(&tcpSesReconnectCount);
set_credits(server, 1); set_credits(server, 1);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsExiting) if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedNegotiate; server->tcpStatus = CifsNeedNegotiate;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
cifs_swn_reset_server_dstaddr(server); cifs_swn_reset_server_dstaddr(server);
cifs_server_unlock(server); cifs_server_unlock(server);
mod_delayed_work(cifsiod_wq, &server->reconnect, 0); mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
} }
} while (server->tcpStatus == CifsNeedReconnect); } while (server->tcpStatus == CifsNeedReconnect);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedNegotiate) if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0); mod_delayed_work(cifsiod_wq, &server->echo, 0);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
wake_up(&server->response_q); wake_up(&server->response_q);
return rc; return rc;
@ -541,10 +537,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
*/ */
atomic_inc(&tcpSesReconnectCount); atomic_inc(&tcpSesReconnectCount);
set_credits(server, 1); set_credits(server, 1);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsExiting) if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedNegotiate; server->tcpStatus = CifsNeedNegotiate;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
cifs_swn_reset_server_dstaddr(server); cifs_swn_reset_server_dstaddr(server);
cifs_server_unlock(server); cifs_server_unlock(server);
mod_delayed_work(cifsiod_wq, &server->reconnect, 0); mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
@ -556,11 +552,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
dfs_cache_free_tgts(&tl); dfs_cache_free_tgts(&tl);
/* Need to set up echo worker again once connection has been established */ /* Need to set up echo worker again once connection has been established */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedNegotiate) if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0); mod_delayed_work(cifsiod_wq, &server->echo, 0);
spin_unlock(&server->srv_lock);
spin_unlock(&cifs_tcp_ses_lock);
wake_up(&server->response_q); wake_up(&server->response_q);
return rc; return rc;
@ -569,12 +564,12 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
{ {
/* If tcp session is not an dfs connection, then reconnect to last target server */ /* If tcp session is not an dfs connection, then reconnect to last target server */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (!server->is_dfs_conn) { if (!server->is_dfs_conn) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return __cifs_reconnect(server, mark_smb_session); return __cifs_reconnect(server, mark_smb_session);
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
mutex_lock(&server->refpath_lock); mutex_lock(&server->refpath_lock);
if (!server->origin_fullpath || !server->leaf_fullpath) { if (!server->origin_fullpath || !server->leaf_fullpath) {
@ -670,18 +665,18 @@ server_unresponsive(struct TCP_Server_Info *server)
* 65s kernel_recvmsg times out, and we see that we haven't gotten * 65s kernel_recvmsg times out, and we see that we haven't gotten
* a response in >60s. * a response in >60s.
*/ */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if ((server->tcpStatus == CifsGood || if ((server->tcpStatus == CifsGood ||
server->tcpStatus == CifsNeedNegotiate) && server->tcpStatus == CifsNeedNegotiate) &&
(!server->ops->can_echo || server->ops->can_echo(server)) && (!server->ops->can_echo || server->ops->can_echo(server)) &&
time_after(jiffies, server->lstrp + 3 * server->echo_interval)) { time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n", cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
(3 * server->echo_interval) / HZ); (3 * server->echo_interval) / HZ);
cifs_reconnect(server, false); cifs_reconnect(server, false);
return true; return true;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return false; return false;
} }
@ -726,18 +721,18 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
else else
length = sock_recvmsg(server->ssocket, smb_msg, 0); length = sock_recvmsg(server->ssocket, smb_msg, 0);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) { if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return -ESHUTDOWN; return -ESHUTDOWN;
} }
if (server->tcpStatus == CifsNeedReconnect) { if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
cifs_reconnect(server, false); cifs_reconnect(server, false);
return -ECONNABORTED; return -ECONNABORTED;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
if (length == -ERESTARTSYS || if (length == -ERESTARTSYS ||
length == -EAGAIN || length == -EAGAIN ||
@ -849,7 +844,7 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
#ifdef CONFIG_CIFS_STATS2 #ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies; mid->when_received = jiffies;
#endif #endif
spin_lock(&GlobalMid_Lock); spin_lock(&mid->server->mid_lock);
if (!malformed) if (!malformed)
mid->mid_state = MID_RESPONSE_RECEIVED; mid->mid_state = MID_RESPONSE_RECEIVED;
else else
@ -859,12 +854,12 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
* function has finished processing it is a bug. * function has finished processing it is a bug.
*/ */
if (mid->mid_flags & MID_DELETED) { if (mid->mid_flags & MID_DELETED) {
spin_unlock(&GlobalMid_Lock); spin_unlock(&mid->server->mid_lock);
pr_warn_once("trying to dequeue a deleted mid\n"); pr_warn_once("trying to dequeue a deleted mid\n");
} else { } else {
list_del_init(&mid->qhead); list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED; mid->mid_flags |= MID_DELETED;
spin_unlock(&GlobalMid_Lock); spin_unlock(&mid->server->mid_lock);
} }
} }
@ -908,16 +903,16 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
int length; int length;
/* take it off the list, if it's not already */ /* take it off the list, if it's not already */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
list_del_init(&server->tcp_ses_list); list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
cancel_delayed_work_sync(&server->echo); cancel_delayed_work_sync(&server->echo);
cancel_delayed_work_sync(&server->resolve); cancel_delayed_work_sync(&server->resolve);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
server->tcpStatus = CifsExiting; server->tcpStatus = CifsExiting;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
wake_up_all(&server->response_q); wake_up_all(&server->response_q);
/* check if we have blocked requests that need to free */ /* check if we have blocked requests that need to free */
@ -948,7 +943,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
struct list_head *tmp, *tmp2; struct list_head *tmp, *tmp2;
INIT_LIST_HEAD(&dispose_list); INIT_LIST_HEAD(&dispose_list);
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead); mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid); cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
@ -957,7 +952,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
list_move(&mid_entry->qhead, &dispose_list); list_move(&mid_entry->qhead, &dispose_list);
mid_entry->mid_flags |= MID_DELETED; mid_entry->mid_flags |= MID_DELETED;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
/* now walk dispose list and issue callbacks */ /* now walk dispose list and issue callbacks */
list_for_each_safe(tmp, tmp2, &dispose_list) { list_for_each_safe(tmp, tmp2, &dispose_list) {
@ -1410,6 +1405,7 @@ match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
return true; return true;
} }
/* this function must be called with srv_lock held */
static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
{ {
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
@ -1470,6 +1466,7 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
spin_lock(&server->srv_lock);
#ifdef CONFIG_CIFS_DFS_UPCALL #ifdef CONFIG_CIFS_DFS_UPCALL
/* /*
* DFS failover implementation in cifs_reconnect() requires unique tcp sessions for * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for
@ -1477,15 +1474,20 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
* shares or even links that may connect to same server but having completely * shares or even links that may connect to same server but having completely
* different failover targets. * different failover targets.
*/ */
if (server->is_dfs_conn) if (server->is_dfs_conn) {
spin_unlock(&server->srv_lock);
continue; continue;
}
#endif #endif
/* /*
* Skip ses channels since they're only handled in lower layers * Skip ses channels since they're only handled in lower layers
* (e.g. cifs_send_recv). * (e.g. cifs_send_recv).
*/ */
if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
spin_unlock(&server->srv_lock);
continue; continue;
}
spin_unlock(&server->srv_lock);
++server->srv_count; ++server->srv_count;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
@ -1533,9 +1535,9 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
else else
cancel_delayed_work_sync(&server->reconnect); cancel_delayed_work_sync(&server->reconnect);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
server->tcpStatus = CifsExiting; server->tcpStatus = CifsExiting;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
cifs_crypto_secmech_release(server); cifs_crypto_secmech_release(server);
@ -1594,8 +1596,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
if (primary_server) { if (primary_server) {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
++primary_server->srv_count; ++primary_server->srv_count;
tcp_ses->primary_server = primary_server;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
tcp_ses->primary_server = primary_server;
} }
init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q); init_waitqueue_head(&tcp_ses->request_q);
@ -1611,6 +1613,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
tcp_ses->lstrp = jiffies; tcp_ses->lstrp = jiffies;
tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression); tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
spin_lock_init(&tcp_ses->req_lock); spin_lock_init(&tcp_ses->req_lock);
spin_lock_init(&tcp_ses->srv_lock);
spin_lock_init(&tcp_ses->mid_lock);
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
@ -1684,9 +1688,9 @@ smbd_connected:
* to the struct since the kernel thread not created yet * to the struct since the kernel thread not created yet
* no need to spinlock this update of tcpStatus * no need to spinlock this update of tcpStatus
*/ */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcp_ses->srv_lock);
tcp_ses->tcpStatus = CifsNeedNegotiate; tcp_ses->tcpStatus = CifsNeedNegotiate;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcp_ses->srv_lock);
if ((ctx->max_credits < 20) || (ctx->max_credits > 60000)) if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE; tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
@ -1728,6 +1732,7 @@ out_err:
return ERR_PTR(rc); return ERR_PTR(rc);
} }
/* this function must be called with ses_lock held */
static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
{ {
if (ctx->sectype != Unspecified && if (ctx->sectype != Unspecified &&
@ -1863,10 +1868,17 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (ses->ses_status == SES_EXITING) spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_EXITING) {
spin_unlock(&ses->ses_lock);
continue; continue;
if (!match_session(ses, ctx)) }
if (!match_session(ses, ctx)) {
spin_unlock(&ses->ses_lock);
continue; continue;
}
spin_unlock(&ses->ses_lock);
++ses->ses_count; ++ses->ses_count;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
return ses; return ses;
@ -1881,26 +1893,28 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
unsigned int chan_count; unsigned int chan_count;
struct TCP_Server_Info *server = ses->server; struct TCP_Server_Info *server = ses->server;
spin_lock(&cifs_tcp_ses_lock); spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_EXITING) { if (ses->ses_status == SES_EXITING) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
return; return;
} }
spin_unlock(&ses->ses_lock);
cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
cifs_dbg(FYI, "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->treeName : "NONE"); cifs_dbg(FYI, "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->treeName : "NONE");
spin_lock(&cifs_tcp_ses_lock);
if (--ses->ses_count > 0) { if (--ses->ses_count > 0) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
return; return;
} }
spin_unlock(&cifs_tcp_ses_lock);
/* ses_count can never go negative */ /* ses_count can never go negative */
WARN_ON(ses->ses_count < 0); WARN_ON(ses->ses_count < 0);
if (ses->ses_status == SES_GOOD) if (ses->ses_status == SES_GOOD)
ses->ses_status = SES_EXITING; ses->ses_status = SES_EXITING;
spin_unlock(&cifs_tcp_ses_lock);
cifs_free_ipc(ses); cifs_free_ipc(ses);
@ -2235,6 +2249,7 @@ get_ses_fail:
return ERR_PTR(rc); return ERR_PTR(rc);
} }
/* this function must be called with tc_lock held */
static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{ {
if (tcon->status == TID_EXITING) if (tcon->status == TID_EXITING)
@ -2261,9 +2276,13 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (!match_tcon(tcon, ctx)) spin_lock(&tcon->tc_lock);
if (!match_tcon(tcon, ctx)) {
spin_unlock(&tcon->tc_lock);
continue; continue;
}
++tcon->tc_count; ++tcon->tc_count;
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
return tcon; return tcon;
} }
@ -2663,6 +2682,9 @@ cifs_match_super(struct super_block *sb, void *data)
ctx = mnt_data->ctx; ctx = mnt_data->ctx;
spin_lock(&tcp_srv->srv_lock);
spin_lock(&ses->ses_lock);
spin_lock(&tcon->tc_lock);
if (!match_server(tcp_srv, ctx) || if (!match_server(tcp_srv, ctx) ||
!match_session(ses, ctx) || !match_session(ses, ctx) ||
!match_tcon(tcon, ctx) || !match_tcon(tcon, ctx) ||
@ -2673,6 +2695,10 @@ cifs_match_super(struct super_block *sb, void *data)
rc = compare_mount_options(sb, mnt_data); rc = compare_mount_options(sb, mnt_data);
out: out:
spin_unlock(&tcon->tc_lock);
spin_unlock(&ses->ses_lock);
spin_unlock(&tcp_srv->srv_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
cifs_put_tlink(tlink); cifs_put_tlink(tlink);
return rc; return rc;
@ -3178,15 +3204,15 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
* for just this mount. * for just this mount.
*/ */
reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx); reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->ses->server->srv_lock);
if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
(le64_to_cpu(tcon->fsUnixInfo.Capability) & (le64_to_cpu(tcon->fsUnixInfo.Capability) &
CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->ses->server->srv_lock);
rc = -EACCES; rc = -EACCES;
goto out; goto out;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->ses->server->srv_lock);
} else } else
tcon->unix_ext = 0; /* server does not support them */ tcon->unix_ext = 0; /* server does not support them */
@ -3269,9 +3295,9 @@ static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx)
rc = mount_get_conns(mnt_ctx); rc = mount_get_conns(mnt_ctx);
if (mnt_ctx->server) { if (mnt_ctx->server) {
cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__); cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&mnt_ctx->server->srv_lock);
mnt_ctx->server->is_dfs_conn = true; mnt_ctx->server->is_dfs_conn = true;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&mnt_ctx->server->srv_lock);
} }
return rc; return rc;
} }
@ -3986,28 +4012,28 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
return -ENOSYS; return -ENOSYS;
/* only send once per connect */ /* only send once per connect */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (!server->ops->need_neg(server) || if (!server->ops->need_neg(server) ||
server->tcpStatus != CifsNeedNegotiate) { server->tcpStatus != CifsNeedNegotiate) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return 0; return 0;
} }
server->tcpStatus = CifsInNegotiate; server->tcpStatus = CifsInNegotiate;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
rc = server->ops->negotiate(xid, ses, server); rc = server->ops->negotiate(xid, ses, server);
if (rc == 0) { if (rc == 0) {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsInNegotiate) if (server->tcpStatus == CifsInNegotiate)
server->tcpStatus = CifsGood; server->tcpStatus = CifsGood;
else else
rc = -EHOSTDOWN; rc = -EHOSTDOWN;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
} else { } else {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsInNegotiate) if (server->tcpStatus == CifsInNegotiate)
server->tcpStatus = CifsNeedNegotiate; server->tcpStatus = CifsNeedNegotiate;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
} }
return rc; return rc;
@ -4023,7 +4049,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
bool is_binding = false; bool is_binding = false;
spin_lock(&cifs_tcp_ses_lock); spin_lock(&ses->ses_lock);
if (server->dstaddr.ss_family == AF_INET6) if (server->dstaddr.ss_family == AF_INET6)
scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr); scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
else else
@ -4032,7 +4058,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (ses->ses_status != SES_GOOD && if (ses->ses_status != SES_GOOD &&
ses->ses_status != SES_NEW && ses->ses_status != SES_NEW &&
ses->ses_status != SES_NEED_RECON) { ses->ses_status != SES_NEED_RECON) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
return 0; return 0;
} }
@ -4041,7 +4067,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (CIFS_ALL_CHANS_GOOD(ses) || if (CIFS_ALL_CHANS_GOOD(ses) ||
cifs_chan_in_reconnect(ses, server)) { cifs_chan_in_reconnect(ses, server)) {
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
return 0; return 0;
} }
is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses); is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
@ -4050,7 +4076,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (!is_binding) if (!is_binding)
ses->ses_status = SES_IN_SETUP; ses->ses_status = SES_IN_SETUP;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
if (!is_binding) { if (!is_binding) {
ses->capabilities = server->capabilities; ses->capabilities = server->capabilities;
@ -4074,22 +4100,22 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (rc) { if (rc) {
cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc); cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_IN_SETUP) if (ses->ses_status == SES_IN_SETUP)
ses->ses_status = SES_NEED_RECON; ses->ses_status = SES_NEED_RECON;
spin_lock(&ses->chan_lock); spin_lock(&ses->chan_lock);
cifs_chan_clear_in_reconnect(ses, server); cifs_chan_clear_in_reconnect(ses, server);
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
} else { } else {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_IN_SETUP) if (ses->ses_status == SES_IN_SETUP)
ses->ses_status = SES_GOOD; ses->ses_status = SES_GOOD;
spin_lock(&ses->chan_lock); spin_lock(&ses->chan_lock);
cifs_chan_clear_in_reconnect(ses, server); cifs_chan_clear_in_reconnect(ses, server);
cifs_chan_clear_need_reconnect(ses, server); cifs_chan_clear_need_reconnect(ses, server);
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
} }
return rc; return rc;
@ -4553,15 +4579,15 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
struct dfs_info3_param ref = {0}; struct dfs_info3_param ref = {0};
/* only send once per connect */ /* only send once per connect */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock);
if (tcon->ses->ses_status != SES_GOOD || if (tcon->ses->ses_status != SES_GOOD ||
(tcon->status != TID_NEW && (tcon->status != TID_NEW &&
tcon->status != TID_NEED_TCON)) { tcon->status != TID_NEED_TCON)) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->ses->ses_lock);
return 0; return 0;
} }
tcon->status = TID_IN_TCON; tcon->status = TID_IN_TCON;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->tc_lock);
tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
if (!tree) { if (!tree) {
@ -4600,15 +4626,15 @@ out:
cifs_put_tcp_super(sb); cifs_put_tcp_super(sb);
if (rc) { if (rc) {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON) if (tcon->status == TID_IN_TCON)
tcon->status = TID_NEED_TCON; tcon->status = TID_NEED_TCON;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->tc_lock);
} else { } else {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON) if (tcon->status == TID_IN_TCON)
tcon->status = TID_GOOD; tcon->status = TID_GOOD;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->tc_lock);
tcon->need_reconnect = false; tcon->need_reconnect = false;
} }
@ -4621,28 +4647,28 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
const struct smb_version_operations *ops = tcon->ses->server->ops; const struct smb_version_operations *ops = tcon->ses->server->ops;
/* only send once per connect */ /* only send once per connect */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock);
if (tcon->ses->ses_status != SES_GOOD || if (tcon->ses->ses_status != SES_GOOD ||
(tcon->status != TID_NEW && (tcon->status != TID_NEW &&
tcon->status != TID_NEED_TCON)) { tcon->status != TID_NEED_TCON)) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->tc_lock);
return 0; return 0;
} }
tcon->status = TID_IN_TCON; tcon->status = TID_IN_TCON;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->tc_lock);
rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc); rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
if (rc) { if (rc) {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON) if (tcon->status == TID_IN_TCON)
tcon->status = TID_NEED_TCON; tcon->status = TID_NEED_TCON;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->tc_lock);
} else { } else {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON) if (tcon->status == TID_IN_TCON)
tcon->status = TID_GOOD; tcon->status = TID_GOOD;
spin_unlock(&cifs_tcp_ses_lock);
tcon->need_reconnect = false; tcon->need_reconnect = false;
spin_unlock(&tcon->tc_lock);
} }
return rc; return rc;

View File

@ -1526,15 +1526,21 @@ static void refresh_mounts(struct cifs_ses **sessions)
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
if (!server->is_dfs_conn) spin_lock(&server->srv_lock);
if (!server->is_dfs_conn) {
spin_unlock(&server->srv_lock);
continue; continue;
}
spin_unlock(&server->srv_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->tc_lock);
if (!tcon->ipc && !tcon->need_reconnect) { if (!tcon->ipc && !tcon->need_reconnect) {
tcon->tc_count++; tcon->tc_count++;
list_add_tail(&tcon->ulist, &tcons); list_add_tail(&tcon->ulist, &tcons);
} }
spin_unlock(&tcon->tc_lock);
} }
} }
} }

View File

@ -69,6 +69,7 @@ sesInfoAlloc(void)
ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL); ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
if (ret_buf) { if (ret_buf) {
atomic_inc(&sesInfoAllocCount); atomic_inc(&sesInfoAllocCount);
spin_lock_init(&ret_buf->ses_lock);
ret_buf->ses_status = SES_NEW; ret_buf->ses_status = SES_NEW;
++ret_buf->ses_count; ++ret_buf->ses_count;
INIT_LIST_HEAD(&ret_buf->smb_ses_list); INIT_LIST_HEAD(&ret_buf->smb_ses_list);
@ -126,6 +127,7 @@ tconInfoAlloc(void)
atomic_inc(&tconInfoAllocCount); atomic_inc(&tconInfoAllocCount);
ret_buf->status = TID_NEW; ret_buf->status = TID_NEW;
++ret_buf->tc_count; ++ret_buf->tc_count;
spin_lock_init(&ret_buf->tc_lock);
INIT_LIST_HEAD(&ret_buf->openFileList); INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list); INIT_LIST_HEAD(&ret_buf->tcon_list);
spin_lock_init(&ret_buf->open_file_lock); spin_lock_init(&ret_buf->open_file_lock);

View File

@ -92,17 +92,17 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
struct smb_hdr *buf = (struct smb_hdr *)buffer; struct smb_hdr *buf = (struct smb_hdr *)buffer;
struct mid_q_entry *mid; struct mid_q_entry *mid;
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) { list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if (compare_mid(mid->mid, buf) && if (compare_mid(mid->mid, buf) &&
mid->mid_state == MID_REQUEST_SUBMITTED && mid->mid_state == MID_REQUEST_SUBMITTED &&
le16_to_cpu(mid->command) == buf->Command) { le16_to_cpu(mid->command) == buf->Command) {
kref_get(&mid->refcount); kref_get(&mid->refcount);
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return mid; return mid;
} }
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return NULL; return NULL;
} }
@ -166,7 +166,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
__u16 last_mid, cur_mid; __u16 last_mid, cur_mid;
bool collision, reconnect = false; bool collision, reconnect = false;
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
/* mid is 16 bit only for CIFS/SMB */ /* mid is 16 bit only for CIFS/SMB */
cur_mid = (__u16)((server->CurrentMid) & 0xffff); cur_mid = (__u16)((server->CurrentMid) & 0xffff);
@ -225,7 +225,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
} }
cur_mid++; cur_mid++;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
if (reconnect) { if (reconnect) {
cifs_signal_cifsd_for_reconnect(server, false); cifs_signal_cifsd_for_reconnect(server, false);

View File

@ -126,13 +126,13 @@ smb2_add_credits(struct TCP_Server_Info *server,
optype, scredits, add); optype, scredits, add);
} }
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect if (server->tcpStatus == CifsNeedReconnect
|| server->tcpStatus == CifsExiting) { || server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return; return;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
switch (rc) { switch (rc) {
case -1: case -1:
@ -218,12 +218,12 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
spin_lock(&server->req_lock); spin_lock(&server->req_lock);
} else { } else {
spin_unlock(&server->req_lock); spin_unlock(&server->req_lock);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) { if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return -ENOENT; return -ENOENT;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
spin_lock(&server->req_lock); spin_lock(&server->req_lock);
scredits = server->credits; scredits = server->credits;
@ -319,19 +319,19 @@ smb2_get_next_mid(struct TCP_Server_Info *server)
{ {
__u64 mid; __u64 mid;
/* for SMB2 we need the current value */ /* for SMB2 we need the current value */
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
mid = server->CurrentMid++; mid = server->CurrentMid++;
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return mid; return mid;
} }
static void static void
smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val) smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
{ {
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
if (server->CurrentMid >= val) if (server->CurrentMid >= val)
server->CurrentMid -= val; server->CurrentMid -= val;
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
} }
static struct mid_q_entry * static struct mid_q_entry *
@ -346,7 +346,7 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
return NULL; return NULL;
} }
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) { list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if ((mid->mid == wire_mid) && if ((mid->mid == wire_mid) &&
(mid->mid_state == MID_REQUEST_SUBMITTED) && (mid->mid_state == MID_REQUEST_SUBMITTED) &&
@ -356,11 +356,11 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
list_del_init(&mid->qhead); list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED; mid->mid_flags |= MID_DELETED;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return mid; return mid;
} }
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return NULL; return NULL;
} }
@ -403,9 +403,9 @@ smb2_negotiate(const unsigned int xid,
{ {
int rc; int rc;
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
server->CurrentMid = 0; server->CurrentMid = 0;
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
rc = SMB2_negotiate(xid, ses, server); rc = SMB2_negotiate(xid, ses, server);
/* BB we probably don't need to retry with modern servers */ /* BB we probably don't need to retry with modern servers */
if (rc == -EAGAIN) if (rc == -EAGAIN)
@ -2585,7 +2585,9 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) { if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
spin_lock(&tcon->tc_lock);
tcon->need_reconnect = true; tcon->need_reconnect = true;
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
pr_warn_once("Server share %s deleted.\n", pr_warn_once("Server share %s deleted.\n",
tcon->treeName); tcon->treeName);
@ -4561,9 +4563,11 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (ses->Suid == ses_id) { if (ses->Suid == ses_id) {
spin_lock(&ses->ses_lock);
ses_enc_key = enc ? ses->smb3encryptionkey : ses_enc_key = enc ? ses->smb3encryptionkey :
ses->smb3decryptionkey; ses->smb3decryptionkey;
memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
spin_unlock(&ses->ses_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
return 0; return 0;
} }
@ -5078,20 +5082,21 @@ static void smb2_decrypt_offload(struct work_struct *work)
mid->callback(mid); mid->callback(mid);
} else { } else {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&dw->server->srv_lock);
spin_lock(&GlobalMid_Lock);
if (dw->server->tcpStatus == CifsNeedReconnect) { if (dw->server->tcpStatus == CifsNeedReconnect) {
spin_lock(&dw->server->mid_lock);
mid->mid_state = MID_RETRY_NEEDED; mid->mid_state = MID_RETRY_NEEDED;
spin_unlock(&GlobalMid_Lock); spin_unlock(&dw->server->mid_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&dw->server->srv_lock);
mid->callback(mid); mid->callback(mid);
} else { } else {
spin_lock(&dw->server->mid_lock);
mid->mid_state = MID_REQUEST_SUBMITTED; mid->mid_state = MID_REQUEST_SUBMITTED;
mid->mid_flags &= ~(MID_DELETED); mid->mid_flags &= ~(MID_DELETED);
list_add_tail(&mid->qhead, list_add_tail(&mid->qhead,
&dw->server->pending_mid_q); &dw->server->pending_mid_q);
spin_unlock(&GlobalMid_Lock); spin_unlock(&dw->server->mid_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&dw->server->srv_lock);
} }
} }
cifs_mid_q_entry_release(mid); cifs_mid_q_entry_release(mid);

View File

@ -162,7 +162,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
return 0; return 0;
spin_lock(&cifs_tcp_ses_lock); spin_lock(&tcon->tc_lock);
if (tcon->status == TID_EXITING) { if (tcon->status == TID_EXITING) {
/* /*
* only tree disconnect, open, and write, * only tree disconnect, open, and write,
@ -172,13 +172,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if ((smb2_command != SMB2_WRITE) && if ((smb2_command != SMB2_WRITE) &&
(smb2_command != SMB2_CREATE) && (smb2_command != SMB2_CREATE) &&
(smb2_command != SMB2_TREE_DISCONNECT)) { (smb2_command != SMB2_TREE_DISCONNECT)) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->tc_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n", cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb2_command); smb2_command);
return -ENODEV; return -ENODEV;
} }
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&tcon->tc_lock);
if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) || if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) ||
(!tcon->ses->server) || !server) (!tcon->ses->server) || !server)
return -EIO; return -EIO;
@ -217,12 +217,12 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
} }
/* are we still trying to reconnect? */ /* are we still trying to reconnect? */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsNeedReconnect) { if (server->tcpStatus != CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
break; break;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
if (retries && --retries) if (retries && --retries)
continue; continue;
@ -256,13 +256,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
* and the server never sends an answer the socket will be closed * and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect. * and tcpStatus set to reconnect.
*/ */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect) { if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
rc = -EHOSTDOWN; rc = -EHOSTDOWN;
goto out; goto out;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
/* /*
* need to prevent multiple threads trying to simultaneously * need to prevent multiple threads trying to simultaneously
@ -3911,15 +3911,15 @@ SMB2_echo(struct TCP_Server_Info *server)
cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id); cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->ops->need_neg && if (server->ops->need_neg &&
server->ops->need_neg(server)) { server->ops->need_neg(server)) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
/* No need to send echo on newly established connections */ /* No need to send echo on newly established connections */
mod_delayed_work(cifsiod_wq, &server->reconnect, 0); mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
return rc; return rc;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
rc = smb2_plain_req_init(SMB2_ECHO, NULL, server, rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
(void **)&req, &total_len); (void **)&req, &total_len);

View File

@ -640,13 +640,13 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
if (!is_signed) if (!is_signed)
return 0; return 0;
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->ops->need_neg && if (server->ops->need_neg &&
server->ops->need_neg(server)) { server->ops->need_neg(server)) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return 0; return 0;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
if (!is_binding && !server->session_estab) { if (!is_binding && !server->session_estab) {
strncpy(shdr->Signature, "BSRSPYL", 8); strncpy(shdr->Signature, "BSRSPYL", 8);
return 0; return 0;
@ -762,28 +762,30 @@ static int
smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server, smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
struct smb2_hdr *shdr, struct mid_q_entry **mid) struct smb2_hdr *shdr, struct mid_q_entry **mid)
{ {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) { if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return -ENOENT; return -ENOENT;
} }
if (server->tcpStatus == CifsNeedReconnect) { if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
cifs_dbg(FYI, "tcp session dead - return to caller to retry\n"); cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
return -EAGAIN; return -EAGAIN;
} }
if (server->tcpStatus == CifsNeedNegotiate && if (server->tcpStatus == CifsNeedNegotiate &&
shdr->Command != SMB2_NEGOTIATE) { shdr->Command != SMB2_NEGOTIATE) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return -EAGAIN; return -EAGAIN;
} }
spin_unlock(&server->srv_lock);
spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_NEW) { if (ses->ses_status == SES_NEW) {
if ((shdr->Command != SMB2_SESSION_SETUP) && if ((shdr->Command != SMB2_SESSION_SETUP) &&
(shdr->Command != SMB2_NEGOTIATE)) { (shdr->Command != SMB2_NEGOTIATE)) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
return -EAGAIN; return -EAGAIN;
} }
/* else ok - we are setting up session */ /* else ok - we are setting up session */
@ -791,19 +793,19 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
if (ses->ses_status == SES_EXITING) { if (ses->ses_status == SES_EXITING) {
if (shdr->Command != SMB2_LOGOFF) { if (shdr->Command != SMB2_LOGOFF) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
return -EAGAIN; return -EAGAIN;
} }
/* else ok - we are shutting down the session */ /* else ok - we are shutting down the session */
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
*mid = smb2_mid_entry_alloc(shdr, server); *mid = smb2_mid_entry_alloc(shdr, server);
if (*mid == NULL) if (*mid == NULL)
return -ENOMEM; return -ENOMEM;
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_add_tail(&(*mid)->qhead, &server->pending_mid_q); list_add_tail(&(*mid)->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return 0; return 0;
} }
@ -869,13 +871,13 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
(struct smb2_hdr *)rqst->rq_iov[0].iov_base; (struct smb2_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid; struct mid_q_entry *mid;
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedNegotiate && if (server->tcpStatus == CifsNeedNegotiate &&
shdr->Command != SMB2_NEGOTIATE) { shdr->Command != SMB2_NEGOTIATE) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
smb2_seq_num_into_buf(server, shdr); smb2_seq_num_into_buf(server, shdr);

View File

@ -154,9 +154,11 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
{ {
spin_lock(&GlobalMid_Lock); struct TCP_Server_Info *server = midEntry->server;
spin_lock(&server->mid_lock);
kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
} }
void DeleteMidQEntry(struct mid_q_entry *midEntry) void DeleteMidQEntry(struct mid_q_entry *midEntry)
@ -167,12 +169,12 @@ void DeleteMidQEntry(struct mid_q_entry *midEntry)
void void
cifs_delete_mid(struct mid_q_entry *mid) cifs_delete_mid(struct mid_q_entry *mid)
{ {
spin_lock(&GlobalMid_Lock); spin_lock(&mid->server->mid_lock);
if (!(mid->mid_flags & MID_DELETED)) { if (!(mid->mid_flags & MID_DELETED)) {
list_del_init(&mid->qhead); list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED; mid->mid_flags |= MID_DELETED;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&mid->server->mid_lock);
DeleteMidQEntry(mid); DeleteMidQEntry(mid);
} }
@ -577,12 +579,12 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
} else { } else {
spin_unlock(&server->req_lock); spin_unlock(&server->req_lock);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) { if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return -ENOENT; return -ENOENT;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
/* /*
* For normal commands, reserve the last MAX_COMPOUND * For normal commands, reserve the last MAX_COMPOUND
@ -725,11 +727,11 @@ cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ) struct mid_q_entry **ppmidQ)
{ {
spin_lock(&cifs_tcp_ses_lock); spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_NEW) { if (ses->ses_status == SES_NEW) {
if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
(in_buf->Command != SMB_COM_NEGOTIATE)) { (in_buf->Command != SMB_COM_NEGOTIATE)) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
return -EAGAIN; return -EAGAIN;
} }
/* else ok - we are setting up session */ /* else ok - we are setting up session */
@ -738,19 +740,19 @@ static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
if (ses->ses_status == SES_EXITING) { if (ses->ses_status == SES_EXITING) {
/* check if SMB session is bad because we are setting it up */ /* check if SMB session is bad because we are setting it up */
if (in_buf->Command != SMB_COM_LOGOFF_ANDX) { if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
return -EAGAIN; return -EAGAIN;
} }
/* else ok - we are shutting down session */ /* else ok - we are shutting down session */
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
*ppmidQ = AllocMidQEntry(in_buf, ses->server); *ppmidQ = AllocMidQEntry(in_buf, ses->server);
if (*ppmidQ == NULL) if (*ppmidQ == NULL)
return -ENOMEM; return -ENOMEM;
spin_lock(&GlobalMid_Lock); spin_lock(&ses->server->mid_lock);
list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
spin_unlock(&GlobalMid_Lock); spin_unlock(&ses->server->mid_lock);
return 0; return 0;
} }
@ -849,9 +851,9 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
mid->mid_state = MID_REQUEST_SUBMITTED; mid->mid_state = MID_REQUEST_SUBMITTED;
/* put it on the pending_mid_q */ /* put it on the pending_mid_q */
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_add_tail(&mid->qhead, &server->pending_mid_q); list_add_tail(&mid->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
/* /*
* Need to store the time in mid before calling I/O. For call_async, * Need to store the time in mid before calling I/O. For call_async,
@ -912,10 +914,10 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n", cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
__func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state); __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
switch (mid->mid_state) { switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED: case MID_RESPONSE_RECEIVED:
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return rc; return rc;
case MID_RETRY_NEEDED: case MID_RETRY_NEEDED:
rc = -EAGAIN; rc = -EAGAIN;
@ -935,7 +937,7 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
__func__, mid->mid, mid->mid_state); __func__, mid->mid, mid->mid_state);
rc = -EIO; rc = -EIO;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
DeleteMidQEntry(mid); DeleteMidQEntry(mid);
return rc; return rc;
@ -1078,12 +1080,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
return -EIO; return -EIO;
} }
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) { if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return -ENOENT; return -ENOENT;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
/* /*
* Wait for all the requests to become available. * Wait for all the requests to become available.
@ -1186,17 +1188,17 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/* /*
* Compounding is never used during session establish. * Compounding is never used during session establish.
*/ */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&ses->ses_lock);
if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
cifs_server_lock(server); cifs_server_lock(server);
smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec); smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cifs_server_unlock(server); cifs_server_unlock(server);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&ses->ses_lock);
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
for (i = 0; i < num_rqst; i++) { for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(server, midQ[i]); rc = wait_for_response(server, midQ[i]);
@ -1208,14 +1210,14 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n", cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
midQ[i]->mid, le16_to_cpu(midQ[i]->command)); midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(server, &rqst[i], midQ[i]); send_cancel(server, &rqst[i], midQ[i]);
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
midQ[i]->mid_flags |= MID_WAIT_CANCELLED; midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) { if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
midQ[i]->callback = cifs_cancelled_callback; midQ[i]->callback = cifs_cancelled_callback;
cancelled_mid[i] = true; cancelled_mid[i] = true;
credits[i].value = 0; credits[i].value = 0;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
} }
} }
@ -1259,19 +1261,19 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/* /*
* Compounding is never used during session establish. * Compounding is never used during session establish.
*/ */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&ses->ses_lock);
if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
struct kvec iov = { struct kvec iov = {
.iov_base = resp_iov[0].iov_base, .iov_base = resp_iov[0].iov_base,
.iov_len = resp_iov[0].iov_len .iov_len = resp_iov[0].iov_len
}; };
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
cifs_server_lock(server); cifs_server_lock(server);
smb311_update_preauth_hash(ses, server, &iov, 1); smb311_update_preauth_hash(ses, server, &iov, 1);
cifs_server_unlock(server); cifs_server_unlock(server);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&ses->ses_lock);
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&ses->ses_lock);
out: out:
/* /*
@ -1360,12 +1362,12 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
return -EIO; return -EIO;
} }
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) { if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return -ENOENT; return -ENOENT;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
/* Ensure that we do not send more than 50 overlapping requests /* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or to the same server. We may make this configurable later or
@ -1419,15 +1421,15 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = wait_for_response(server, midQ); rc = wait_for_response(server, midQ);
if (rc != 0) { if (rc != 0) {
send_cancel(server, &rqst, midQ); send_cancel(server, &rqst, midQ);
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) { if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */ /* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry; midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
add_credits(server, &credits, 0); add_credits(server, &credits, 0);
return rc; return rc;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
} }
rc = cifs_sync_mid_result(midQ, server); rc = cifs_sync_mid_result(midQ, server);
@ -1505,12 +1507,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return -EIO; return -EIO;
} }
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) { if (server->tcpStatus == CifsExiting) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
return -ENOENT; return -ENOENT;
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
/* Ensure that we do not send more than 50 overlapping requests /* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or to the same server. We may make this configurable later or
@ -1568,12 +1570,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
(server->tcpStatus != CifsNew))); (server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */ /* Were we interrupted by a signal ? */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
if ((rc == -ERESTARTSYS) && if ((rc == -ERESTARTSYS) &&
(midQ->mid_state == MID_REQUEST_SUBMITTED) && (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
((server->tcpStatus == CifsGood) || ((server->tcpStatus == CifsGood) ||
(server->tcpStatus == CifsNew))) { (server->tcpStatus == CifsNew))) {
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
if (in_buf->Command == SMB_COM_TRANSACTION2) { if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the /* POSIX lock. We send a NT_CANCEL SMB to cause the
@ -1600,21 +1602,21 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
rc = wait_for_response(server, midQ); rc = wait_for_response(server, midQ);
if (rc) { if (rc) {
send_cancel(server, &rqst, midQ); send_cancel(server, &rqst, midQ);
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) { if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */ /* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry; midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return rc; return rc;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
} }
/* We got the response - restart system call. */ /* We got the response - restart system call. */
rstart = 1; rstart = 1;
spin_lock(&cifs_tcp_ses_lock); spin_lock(&server->srv_lock);
} }
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&server->srv_lock);
rc = cifs_sync_mid_result(midQ, server); rc = cifs_sync_mid_result(midQ, server);
if (rc != 0) if (rc != 0)