forked from Minki/linux
Merge branch 'for-next' of git://git.samba.org/sfrench/cifs-2.6
Pull cifs fixes from Steve French. * 'for-next' of git://git.samba.org/sfrench/cifs-2.6: CIFS: Move get_next_mid to ops struct CIFS: Make accessing is_valid_oplock/dump_detail ops struct field safe CIFS: Improve identation in cifs_unlock_range CIFS: Fix possible wrong memory allocation
This commit is contained in:
commit
bf2785a818
@ -174,6 +174,7 @@ struct smb_version_operations {
|
||||
void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
|
||||
void (*set_credits)(struct TCP_Server_Info *, const int);
|
||||
int * (*get_credits_field)(struct TCP_Server_Info *);
|
||||
__u64 (*get_next_mid)(struct TCP_Server_Info *);
|
||||
/* data offset from read response message */
|
||||
unsigned int (*read_data_offset)(char *);
|
||||
/* data length from read response message */
|
||||
@ -399,6 +400,12 @@ set_credits(struct TCP_Server_Info *server, const int val)
|
||||
server->ops->set_credits(server, val);
|
||||
}
|
||||
|
||||
static inline __u64
|
||||
get_next_mid(struct TCP_Server_Info *server)
|
||||
{
|
||||
return server->ops->get_next_mid(server);
|
||||
}
|
||||
|
||||
/*
|
||||
* Macros to allow the TCP_Server_Info->net field and related code to drop out
|
||||
* when CONFIG_NET_NS isn't set.
|
||||
|
@ -114,7 +114,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
|
||||
void **request_buf);
|
||||
extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
|
||||
const struct nls_table *nls_cp);
|
||||
extern __u64 GetNextMid(struct TCP_Server_Info *server);
|
||||
extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
|
||||
extern u64 cifs_UnixTimeToNT(struct timespec);
|
||||
extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
|
||||
|
@ -268,7 +268,7 @@ small_smb_init_no_tc(const int smb_command, const int wct,
|
||||
return rc;
|
||||
|
||||
buffer = (struct smb_hdr *)*request_buf;
|
||||
buffer->Mid = GetNextMid(ses->server);
|
||||
buffer->Mid = get_next_mid(ses->server);
|
||||
if (ses->capabilities & CAP_UNICODE)
|
||||
buffer->Flags2 |= SMBFLG2_UNICODE;
|
||||
if (ses->capabilities & CAP_STATUS32)
|
||||
@ -402,7 +402,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
|
||||
|
||||
cFYI(1, "secFlags 0x%x", secFlags);
|
||||
|
||||
pSMB->hdr.Mid = GetNextMid(server);
|
||||
pSMB->hdr.Mid = get_next_mid(server);
|
||||
pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
|
||||
|
||||
if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
|
||||
@ -782,7 +782,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
|
||||
return rc;
|
||||
}
|
||||
|
||||
pSMB->hdr.Mid = GetNextMid(ses->server);
|
||||
pSMB->hdr.Mid = get_next_mid(ses->server);
|
||||
|
||||
if (ses->server->sec_mode &
|
||||
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
|
||||
@ -4762,7 +4762,7 @@ getDFSRetry:
|
||||
|
||||
/* server pointer checked in called function,
|
||||
but should never be null here anyway */
|
||||
pSMB->hdr.Mid = GetNextMid(ses->server);
|
||||
pSMB->hdr.Mid = get_next_mid(ses->server);
|
||||
pSMB->hdr.Tid = ses->ipc_tid;
|
||||
pSMB->hdr.Uid = ses->Suid;
|
||||
if (ses->capabilities & CAP_STATUS32)
|
||||
|
@ -1058,13 +1058,15 @@ cifs_demultiplex_thread(void *p)
|
||||
if (mid_entry != NULL) {
|
||||
if (!mid_entry->multiRsp || mid_entry->multiEnd)
|
||||
mid_entry->callback(mid_entry);
|
||||
} else if (!server->ops->is_oplock_break(buf, server)) {
|
||||
} else if (!server->ops->is_oplock_break ||
|
||||
!server->ops->is_oplock_break(buf, server)) {
|
||||
cERROR(1, "No task to wake, unknown frame received! "
|
||||
"NumMids %d", atomic_read(&midCount));
|
||||
cifs_dump_mem("Received Data is: ", buf,
|
||||
HEADER_SIZE(server));
|
||||
#ifdef CONFIG_CIFS_DEBUG2
|
||||
server->ops->dump_detail(buf);
|
||||
if (server->ops->dump_detail)
|
||||
server->ops->dump_detail(buf);
|
||||
cifs_dump_mids(server);
|
||||
#endif /* CIFS_DEBUG2 */
|
||||
|
||||
@ -3938,7 +3940,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
|
||||
header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
|
||||
NULL /*no tid */ , 4 /*wct */ );
|
||||
|
||||
smb_buffer->Mid = GetNextMid(ses->server);
|
||||
smb_buffer->Mid = get_next_mid(ses->server);
|
||||
smb_buffer->Uid = ses->Suid;
|
||||
pSMB = (TCONX_REQ *) smb_buffer;
|
||||
pSMBr = (TCONX_RSP *) smb_buffer_response;
|
||||
|
106
fs/cifs/file.c
106
fs/cifs/file.c
@ -876,7 +876,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
|
||||
struct cifsLockInfo *li, *tmp;
|
||||
struct cifs_tcon *tcon;
|
||||
struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
|
||||
unsigned int num, max_num;
|
||||
unsigned int num, max_num, max_buf;
|
||||
LOCKING_ANDX_RANGE *buf, *cur;
|
||||
int types[] = {LOCKING_ANDX_LARGE_FILES,
|
||||
LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
|
||||
@ -892,8 +892,19 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
|
||||
return rc;
|
||||
}
|
||||
|
||||
max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
|
||||
sizeof(LOCKING_ANDX_RANGE);
|
||||
/*
|
||||
* Accessing maxBuf is racy with cifs_reconnect - need to store value
|
||||
* and check it for zero before using.
|
||||
*/
|
||||
max_buf = tcon->ses->server->maxBuf;
|
||||
if (!max_buf) {
|
||||
mutex_unlock(&cinode->lock_mutex);
|
||||
FreeXid(xid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
||||
sizeof(LOCKING_ANDX_RANGE);
|
||||
buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
||||
if (!buf) {
|
||||
mutex_unlock(&cinode->lock_mutex);
|
||||
@ -1218,7 +1229,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
|
||||
int types[] = {LOCKING_ANDX_LARGE_FILES,
|
||||
LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
|
||||
unsigned int i;
|
||||
unsigned int max_num, num;
|
||||
unsigned int max_num, num, max_buf;
|
||||
LOCKING_ANDX_RANGE *buf, *cur;
|
||||
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
|
||||
struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
|
||||
@ -1228,8 +1239,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
|
||||
|
||||
INIT_LIST_HEAD(&tmp_llist);
|
||||
|
||||
max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
|
||||
sizeof(LOCKING_ANDX_RANGE);
|
||||
/*
|
||||
* Accessing maxBuf is racy with cifs_reconnect - need to store value
|
||||
* and check it for zero before using.
|
||||
*/
|
||||
max_buf = tcon->ses->server->maxBuf;
|
||||
if (!max_buf)
|
||||
return -EINVAL;
|
||||
|
||||
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
||||
sizeof(LOCKING_ANDX_RANGE);
|
||||
buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
@ -1247,46 +1266,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
|
||||
continue;
|
||||
if (types[i] != li->type)
|
||||
continue;
|
||||
if (!cinode->can_cache_brlcks) {
|
||||
cur->Pid = cpu_to_le16(li->pid);
|
||||
cur->LengthLow = cpu_to_le32((u32)li->length);
|
||||
cur->LengthHigh =
|
||||
cpu_to_le32((u32)(li->length>>32));
|
||||
cur->OffsetLow = cpu_to_le32((u32)li->offset);
|
||||
cur->OffsetHigh =
|
||||
cpu_to_le32((u32)(li->offset>>32));
|
||||
/*
|
||||
* We need to save a lock here to let us add
|
||||
* it again to the file's list if the unlock
|
||||
* range request fails on the server.
|
||||
*/
|
||||
list_move(&li->llist, &tmp_llist);
|
||||
if (++num == max_num) {
|
||||
stored_rc = cifs_lockv(xid, tcon,
|
||||
cfile->netfid,
|
||||
li->type, num,
|
||||
0, buf);
|
||||
if (stored_rc) {
|
||||
/*
|
||||
* We failed on the unlock range
|
||||
* request - add all locks from
|
||||
* the tmp list to the head of
|
||||
* the file's list.
|
||||
*/
|
||||
cifs_move_llist(&tmp_llist,
|
||||
&cfile->llist);
|
||||
rc = stored_rc;
|
||||
} else
|
||||
/*
|
||||
* The unlock range request
|
||||
* succeed - free the tmp list.
|
||||
*/
|
||||
cifs_free_llist(&tmp_llist);
|
||||
cur = buf;
|
||||
num = 0;
|
||||
} else
|
||||
cur++;
|
||||
} else {
|
||||
if (cinode->can_cache_brlcks) {
|
||||
/*
|
||||
* We can cache brlock requests - simply remove
|
||||
* a lock from the file's list.
|
||||
@ -1294,7 +1274,41 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
|
||||
list_del(&li->llist);
|
||||
cifs_del_lock_waiters(li);
|
||||
kfree(li);
|
||||
continue;
|
||||
}
|
||||
cur->Pid = cpu_to_le16(li->pid);
|
||||
cur->LengthLow = cpu_to_le32((u32)li->length);
|
||||
cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
|
||||
cur->OffsetLow = cpu_to_le32((u32)li->offset);
|
||||
cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
|
||||
/*
|
||||
* We need to save a lock here to let us add it again to
|
||||
* the file's list if the unlock range request fails on
|
||||
* the server.
|
||||
*/
|
||||
list_move(&li->llist, &tmp_llist);
|
||||
if (++num == max_num) {
|
||||
stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
|
||||
li->type, num, 0, buf);
|
||||
if (stored_rc) {
|
||||
/*
|
||||
* We failed on the unlock range
|
||||
* request - add all locks from the tmp
|
||||
* list to the head of the file's list.
|
||||
*/
|
||||
cifs_move_llist(&tmp_llist,
|
||||
&cfile->llist);
|
||||
rc = stored_rc;
|
||||
} else
|
||||
/*
|
||||
* The unlock range request succeed -
|
||||
* free the tmp list.
|
||||
*/
|
||||
cifs_free_llist(&tmp_llist);
|
||||
cur = buf;
|
||||
num = 0;
|
||||
} else
|
||||
cur++;
|
||||
}
|
||||
if (num) {
|
||||
stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
|
||||
|
@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a free multiplex id (SMB mid). Otherwise there could be
|
||||
* mid collisions which might cause problems, demultiplexing the
|
||||
* wrong response to this request. Multiplex ids could collide if
|
||||
* one of a series requests takes much longer than the others, or
|
||||
* if a very large number of long lived requests (byte range
|
||||
* locks or FindNotify requests) are pending. No more than
|
||||
* 64K-1 requests can be outstanding at one time. If no
|
||||
* mids are available, return zero. A future optimization
|
||||
* could make the combination of mids and uid the key we use
|
||||
* to demultiplex on (rather than mid alone).
|
||||
* In addition to the above check, the cifs demultiplex
|
||||
* code already used the command code as a secondary
|
||||
* check of the frame and if signing is negotiated the
|
||||
* response would be discarded if the mid were the same
|
||||
* but the signature was wrong. Since the mid is not put in the
|
||||
* pending queue until later (when it is about to be dispatched)
|
||||
* we do have to limit the number of outstanding requests
|
||||
* to somewhat less than 64K-1 although it is hard to imagine
|
||||
* so many threads being in the vfs at one time.
|
||||
*/
|
||||
__u64 GetNextMid(struct TCP_Server_Info *server)
|
||||
{
|
||||
__u64 mid = 0;
|
||||
__u16 last_mid, cur_mid;
|
||||
bool collision;
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
|
||||
/* mid is 16 bit only for CIFS/SMB */
|
||||
cur_mid = (__u16)((server->CurrentMid) & 0xffff);
|
||||
/* we do not want to loop forever */
|
||||
last_mid = cur_mid;
|
||||
cur_mid++;
|
||||
|
||||
/*
|
||||
* This nested loop looks more expensive than it is.
|
||||
* In practice the list of pending requests is short,
|
||||
* fewer than 50, and the mids are likely to be unique
|
||||
* on the first pass through the loop unless some request
|
||||
* takes longer than the 64 thousand requests before it
|
||||
* (and it would also have to have been a request that
|
||||
* did not time out).
|
||||
*/
|
||||
while (cur_mid != last_mid) {
|
||||
struct mid_q_entry *mid_entry;
|
||||
unsigned int num_mids;
|
||||
|
||||
collision = false;
|
||||
if (cur_mid == 0)
|
||||
cur_mid++;
|
||||
|
||||
num_mids = 0;
|
||||
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
|
||||
++num_mids;
|
||||
if (mid_entry->mid == cur_mid &&
|
||||
mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
|
||||
/* This mid is in use, try a different one */
|
||||
collision = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* if we have more than 32k mids in the list, then something
|
||||
* is very wrong. Possibly a local user is trying to DoS the
|
||||
* box by issuing long-running calls and SIGKILL'ing them. If
|
||||
* we get to 2^16 mids then we're in big trouble as this
|
||||
* function could loop forever.
|
||||
*
|
||||
* Go ahead and assign out the mid in this situation, but force
|
||||
* an eventual reconnect to clean out the pending_mid_q.
|
||||
*/
|
||||
if (num_mids > 32768)
|
||||
server->tcpStatus = CifsNeedReconnect;
|
||||
|
||||
if (!collision) {
|
||||
mid = (__u64)cur_mid;
|
||||
server->CurrentMid = mid;
|
||||
break;
|
||||
}
|
||||
cur_mid++;
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return mid;
|
||||
}
|
||||
|
||||
/* NB: MID can not be set if treeCon not passed in, in that
|
||||
case it is responsbility of caller to set the mid */
|
||||
void
|
||||
@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
|
||||
|
||||
/* Uid is not converted */
|
||||
buffer->Uid = treeCon->ses->Suid;
|
||||
buffer->Mid = GetNextMid(treeCon->ses->server);
|
||||
buffer->Mid = get_next_mid(treeCon->ses->server);
|
||||
}
|
||||
if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
|
||||
buffer->Flags2 |= SMBFLG2_DFS;
|
||||
|
@ -125,6 +125,94 @@ cifs_get_credits_field(struct TCP_Server_Info *server)
|
||||
return &server->credits;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a free multiplex id (SMB mid). Otherwise there could be
|
||||
* mid collisions which might cause problems, demultiplexing the
|
||||
* wrong response to this request. Multiplex ids could collide if
|
||||
* one of a series requests takes much longer than the others, or
|
||||
* if a very large number of long lived requests (byte range
|
||||
* locks or FindNotify requests) are pending. No more than
|
||||
* 64K-1 requests can be outstanding at one time. If no
|
||||
* mids are available, return zero. A future optimization
|
||||
* could make the combination of mids and uid the key we use
|
||||
* to demultiplex on (rather than mid alone).
|
||||
* In addition to the above check, the cifs demultiplex
|
||||
* code already used the command code as a secondary
|
||||
* check of the frame and if signing is negotiated the
|
||||
* response would be discarded if the mid were the same
|
||||
* but the signature was wrong. Since the mid is not put in the
|
||||
* pending queue until later (when it is about to be dispatched)
|
||||
* we do have to limit the number of outstanding requests
|
||||
* to somewhat less than 64K-1 although it is hard to imagine
|
||||
* so many threads being in the vfs at one time.
|
||||
*/
|
||||
static __u64
|
||||
cifs_get_next_mid(struct TCP_Server_Info *server)
|
||||
{
|
||||
__u64 mid = 0;
|
||||
__u16 last_mid, cur_mid;
|
||||
bool collision;
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
|
||||
/* mid is 16 bit only for CIFS/SMB */
|
||||
cur_mid = (__u16)((server->CurrentMid) & 0xffff);
|
||||
/* we do not want to loop forever */
|
||||
last_mid = cur_mid;
|
||||
cur_mid++;
|
||||
|
||||
/*
|
||||
* This nested loop looks more expensive than it is.
|
||||
* In practice the list of pending requests is short,
|
||||
* fewer than 50, and the mids are likely to be unique
|
||||
* on the first pass through the loop unless some request
|
||||
* takes longer than the 64 thousand requests before it
|
||||
* (and it would also have to have been a request that
|
||||
* did not time out).
|
||||
*/
|
||||
while (cur_mid != last_mid) {
|
||||
struct mid_q_entry *mid_entry;
|
||||
unsigned int num_mids;
|
||||
|
||||
collision = false;
|
||||
if (cur_mid == 0)
|
||||
cur_mid++;
|
||||
|
||||
num_mids = 0;
|
||||
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
|
||||
++num_mids;
|
||||
if (mid_entry->mid == cur_mid &&
|
||||
mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
|
||||
/* This mid is in use, try a different one */
|
||||
collision = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* if we have more than 32k mids in the list, then something
|
||||
* is very wrong. Possibly a local user is trying to DoS the
|
||||
* box by issuing long-running calls and SIGKILL'ing them. If
|
||||
* we get to 2^16 mids then we're in big trouble as this
|
||||
* function could loop forever.
|
||||
*
|
||||
* Go ahead and assign out the mid in this situation, but force
|
||||
* an eventual reconnect to clean out the pending_mid_q.
|
||||
*/
|
||||
if (num_mids > 32768)
|
||||
server->tcpStatus = CifsNeedReconnect;
|
||||
|
||||
if (!collision) {
|
||||
mid = (__u64)cur_mid;
|
||||
server->CurrentMid = mid;
|
||||
break;
|
||||
}
|
||||
cur_mid++;
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return mid;
|
||||
}
|
||||
|
||||
struct smb_version_operations smb1_operations = {
|
||||
.send_cancel = send_nt_cancel,
|
||||
.compare_fids = cifs_compare_fids,
|
||||
@ -133,6 +221,7 @@ struct smb_version_operations smb1_operations = {
|
||||
.add_credits = cifs_add_credits,
|
||||
.set_credits = cifs_set_credits,
|
||||
.get_credits_field = cifs_get_credits_field,
|
||||
.get_next_mid = cifs_get_next_mid,
|
||||
.read_data_offset = cifs_read_data_offset,
|
||||
.read_data_length = cifs_read_data_length,
|
||||
.map_error = map_smb_to_linux_error,
|
||||
|
@ -779,7 +779,7 @@ send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
|
||||
pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
|
||||
pSMB->Timeout = 0;
|
||||
pSMB->hdr.Mid = GetNextMid(ses->server);
|
||||
pSMB->hdr.Mid = get_next_mid(ses->server);
|
||||
|
||||
return SendReceive(xid, ses, in_buf, out_buf,
|
||||
&bytes_returned, 0);
|
||||
|
Loading…
Reference in New Issue
Block a user