mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 14:41:39 +00:00
cifs: Fix use after free of a mid_q_entry
With protocol version 2.0 mounts we have seen crashes with corrupt mid entries. Either the server->pending_mid_q list becomes corrupt with a cyclic reference in one element or a mid object fetched by the demultiplexer thread becomes overwritten during use. Code review identified a race between the demultiplexer thread and the request issuing thread. The demultiplexer thread seems to be written with the assumption that it is the sole user of the mid object until it calls the mid callback which either wakes the issuer task or deletes the mid. This assumption is not true because the issuer task can be woken up earlier by a signal. If the demultiplexer thread has proceeded as far as setting the mid_state to MID_RESPONSE_RECEIVED then the issuer thread will happily end up calling cifs_delete_mid while the demultiplexer thread still is using the mid object. Inserting a delay in the cifs demultiplexer thread widens the race window and makes reproduction of the race very easy: if (server->large_buf) buf = server->bigbuf; + usleep_range(500, 4000); server->lstrp = jiffies; To resolve this I think the proper solution involves putting a reference count on the mid object. This patch makes sure that the demultiplexer thread holds a reference until it has finished processing the transaction. Cc: stable@vger.kernel.org Signed-off-by: Lars Persson <larper@axis.com> Acked-by: Paulo Alcantara <palcantara@suse.de> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Signed-off-by: Steve French <stfrench@microsoft.com>
This commit is contained in:
parent
06c8563989
commit
696e420bb2
@ -1416,6 +1416,7 @@ typedef int (mid_handle_t)(struct TCP_Server_Info *server,
|
||||
/* one of these for every pending CIFS request to the server */
|
||||
struct mid_q_entry {
|
||||
struct list_head qhead; /* mids waiting on reply from this server */
|
||||
struct kref refcount;
|
||||
struct TCP_Server_Info *server; /* server corresponding to this mid */
|
||||
__u64 mid; /* multiplex id */
|
||||
__u32 pid; /* process id */
|
||||
|
@ -82,6 +82,7 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
|
||||
struct TCP_Server_Info *server);
|
||||
extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
|
||||
extern void cifs_delete_mid(struct mid_q_entry *mid);
|
||||
extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry);
|
||||
extern void cifs_wake_up_task(struct mid_q_entry *mid);
|
||||
extern int cifs_handle_standard(struct TCP_Server_Info *server,
|
||||
struct mid_q_entry *mid);
|
||||
|
@ -924,6 +924,7 @@ next_pdu:
|
||||
server->pdu_size = next_offset;
|
||||
}
|
||||
|
||||
mid_entry = NULL;
|
||||
if (server->ops->is_transform_hdr &&
|
||||
server->ops->receive_transform &&
|
||||
server->ops->is_transform_hdr(buf)) {
|
||||
@ -938,8 +939,11 @@ next_pdu:
|
||||
length = mid_entry->receive(server, mid_entry);
|
||||
}
|
||||
|
||||
if (length < 0)
|
||||
if (length < 0) {
|
||||
if (mid_entry)
|
||||
cifs_mid_q_entry_release(mid_entry);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (server->large_buf)
|
||||
buf = server->bigbuf;
|
||||
@ -956,6 +960,8 @@ next_pdu:
|
||||
|
||||
if (!mid_entry->multiRsp || mid_entry->multiEnd)
|
||||
mid_entry->callback(mid_entry);
|
||||
|
||||
cifs_mid_q_entry_release(mid_entry);
|
||||
} else if (server->ops->is_oplock_break &&
|
||||
server->ops->is_oplock_break(buf, server)) {
|
||||
cifs_dbg(FYI, "Received oplock break\n");
|
||||
|
@ -107,6 +107,7 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
|
||||
if (compare_mid(mid->mid, buf) &&
|
||||
mid->mid_state == MID_REQUEST_SUBMITTED &&
|
||||
le16_to_cpu(mid->command) == buf->Command) {
|
||||
kref_get(&mid->refcount);
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return mid;
|
||||
}
|
||||
|
@ -203,6 +203,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
|
||||
if ((mid->mid == wire_mid) &&
|
||||
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
|
||||
(mid->command == shdr->Command)) {
|
||||
kref_get(&mid->refcount);
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return mid;
|
||||
}
|
||||
|
@ -548,6 +548,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
|
||||
|
||||
temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
|
||||
memset(temp, 0, sizeof(struct mid_q_entry));
|
||||
kref_init(&temp->refcount);
|
||||
temp->mid = le64_to_cpu(shdr->MessageId);
|
||||
temp->pid = current->pid;
|
||||
temp->command = shdr->Command; /* Always LE */
|
||||
|
@ -61,6 +61,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
|
||||
|
||||
temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
|
||||
memset(temp, 0, sizeof(struct mid_q_entry));
|
||||
kref_init(&temp->refcount);
|
||||
temp->mid = get_mid(smb_buffer);
|
||||
temp->pid = current->pid;
|
||||
temp->command = cpu_to_le16(smb_buffer->Command);
|
||||
@ -82,6 +83,21 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
|
||||
return temp;
|
||||
}
|
||||
|
||||
static void _cifs_mid_q_entry_release(struct kref *refcount)
|
||||
{
|
||||
struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
|
||||
refcount);
|
||||
|
||||
mempool_free(mid, cifs_mid_poolp);
|
||||
}
|
||||
|
||||
void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
|
||||
{
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
}
|
||||
|
||||
void
|
||||
DeleteMidQEntry(struct mid_q_entry *midEntry)
|
||||
{
|
||||
@ -110,7 +126,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
mempool_free(midEntry, cifs_mid_poolp);
|
||||
cifs_mid_q_entry_release(midEntry);
|
||||
}
|
||||
|
||||
void
|
||||
|
Loading…
Reference in New Issue
Block a user