Merge branch 'for-3.4'

This commit is contained in:
Steve French 2012-03-26 21:14:05 -05:00
commit 8f09c3d8db
11 changed files with 1384 additions and 952 deletions

View File

@ -58,15 +58,16 @@ cifs_dump_mem(char *label, void *data, int length)
}
#ifdef CONFIG_CIFS_DEBUG2
void cifs_dump_detail(struct smb_hdr *smb)
void cifs_dump_detail(void *buf)
{
struct smb_hdr *smb = (struct smb_hdr *)buf;
cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
smb->Command, smb->Status.CifsError,
smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb));
}
void cifs_dump_mids(struct TCP_Server_Info *server)
{
struct list_head *tmp;
@ -79,15 +80,15 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
spin_lock(&GlobalMid_Lock);
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %d",
mid_entry->midState,
(int)mid_entry->command,
cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu",
mid_entry->mid_state,
le16_to_cpu(mid_entry->command),
mid_entry->pid,
mid_entry->callback_data,
mid_entry->mid);
#ifdef CONFIG_CIFS_STATS2
cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld",
mid_entry->largeBuf,
mid_entry->large_buf,
mid_entry->resp_buf,
mid_entry->when_received,
jiffies);
@ -217,12 +218,12 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
mid_entry = list_entry(tmp3, struct mid_q_entry,
qhead);
seq_printf(m, "\tState: %d com: %d pid:"
" %d cbdata: %p mid %d\n",
mid_entry->midState,
(int)mid_entry->command,
mid_entry->pid,
mid_entry->callback_data,
mid_entry->mid);
" %d cbdata: %p mid %llu\n",
mid_entry->mid_state,
le16_to_cpu(mid_entry->command),
mid_entry->pid,
mid_entry->callback_data,
mid_entry->mid);
}
spin_unlock(&GlobalMid_Lock);
}
@ -417,7 +418,6 @@ static const struct file_operations cifs_stats_proc_fops = {
static struct proc_dir_entry *proc_fs_cifs;
static const struct file_operations cifsFYI_proc_fops;
static const struct file_operations cifs_oplock_proc_fops;
static const struct file_operations cifs_lookup_cache_proc_fops;
static const struct file_operations traceSMB_proc_fops;
static const struct file_operations cifs_multiuser_mount_proc_fops;
@ -438,7 +438,6 @@ cifs_proc_init(void)
#endif /* STATS */
proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops);
proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops);
proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
&cifs_linux_ext_proc_fops);
proc_create("MultiuserMount", 0, proc_fs_cifs,
@ -462,7 +461,6 @@ cifs_proc_clean(void)
remove_proc_entry("Stats", proc_fs_cifs);
#endif
remove_proc_entry("MultiuserMount", proc_fs_cifs);
remove_proc_entry("OplockEnabled", proc_fs_cifs);
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
@ -508,46 +506,6 @@ static const struct file_operations cifsFYI_proc_fops = {
.write = cifsFYI_proc_write,
};
static int cifs_oplock_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", enable_oplocks);
return 0;
}
static int cifs_oplock_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, cifs_oplock_proc_show, NULL);
}
static ssize_t cifs_oplock_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
char c;
int rc;
printk(KERN_WARNING "CIFS: The /proc/fs/cifs/OplockEnabled interface "
"will be removed in kernel version 3.4. Please migrate to "
"using the 'enable_oplocks' module parameter in cifs.ko.\n");
rc = get_user(c, buffer);
if (rc)
return rc;
if (c == '0' || c == 'n' || c == 'N')
enable_oplocks = false;
else if (c == '1' || c == 'y' || c == 'Y')
enable_oplocks = true;
return count;
}
static const struct file_operations cifs_oplock_proc_fops = {
.owner = THIS_MODULE,
.open = cifs_oplock_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = cifs_oplock_proc_write,
};
static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", linuxExtEnabled);

View File

@ -26,13 +26,13 @@
void cifs_dump_mem(char *label, void *data, int length);
#ifdef CONFIG_CIFS_DEBUG2
#define DBG2 2
void cifs_dump_detail(struct smb_hdr *);
void cifs_dump_detail(void *);
void cifs_dump_mids(struct TCP_Server_Info *);
#else
#define DBG2 0
#endif
extern int traceSMB; /* flag which enables the function below */
void dump_smb(struct smb_hdr *, int);
void dump_smb(void *, int);
#define CIFS_INFO 0x01
#define CIFS_RC 0x02
#define CIFS_TIMER 0x04

View File

@ -85,6 +85,8 @@ extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
struct workqueue_struct *cifsiod_wq;
static int
cifs_read_super(struct super_block *sb)
{
@ -1111,9 +1113,15 @@ init_cifs(void)
cFYI(1, "cifs_max_pending set to max of %u", CIFS_MAX_REQ);
}
cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
if (!cifsiod_wq) {
rc = -ENOMEM;
goto out_clean_proc;
}
rc = cifs_fscache_register();
if (rc)
goto out_clean_proc;
goto out_destroy_wq;
rc = cifs_init_inodecache();
if (rc)
@ -1161,6 +1169,8 @@ out_destroy_inodecache:
cifs_destroy_inodecache();
out_unreg_fscache:
cifs_fscache_unregister();
out_destroy_wq:
destroy_workqueue(cifsiod_wq);
out_clean_proc:
cifs_proc_clean();
return rc;
@ -1183,6 +1193,7 @@ exit_cifs(void)
cifs_destroy_mids();
cifs_destroy_inodecache();
cifs_fscache_unregister();
destroy_workqueue(cifsiod_wq);
cifs_proc_clean();
}

View File

@ -230,6 +230,12 @@ struct cifs_mnt_data {
int flags;
};
static inline unsigned int
get_rfc1002_length(void *buf)
{
return be32_to_cpu(*((__be32 *)buf));
}
struct TCP_Server_Info {
struct list_head tcp_ses_list;
struct list_head smb_ses_list;
@ -276,7 +282,7 @@ struct TCP_Server_Info {
vcnumbers */
int capabilities; /* allow selective disabling of caps by smb sess */
int timeAdj; /* Adjust for difference in server time zone in sec */
__u16 CurrentMid; /* multiplex id - rotating counter */
__u64 CurrentMid; /* multiplex id - rotating counter */
char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
@ -335,6 +341,18 @@ has_credits(struct TCP_Server_Info *server, int *credits)
return num > 0;
}
static inline size_t
header_size(void)
{
return sizeof(struct smb_hdr);
}
static inline size_t
max_header_size(void)
{
return MAX_CIFS_HDR_SIZE;
}
/*
* Macros to allow the TCP_Server_Info->net field and related code to drop out
* when CONFIG_NET_NS isn't set.
@ -583,9 +601,11 @@ struct cifs_io_parms {
* Take a reference on the file private data. Must be called with
* cifs_file_list_lock held.
*/
static inline void cifsFileInfo_get(struct cifsFileInfo *cifs_file)
static inline
struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file)
{
++cifs_file->count;
return cifs_file;
}
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
@ -606,7 +626,7 @@ struct cifsInodeInfo {
bool delete_pending; /* DELETE_ON_CLOSE is set */
bool invalid_mapping; /* pagecache is invalid */
unsigned long time; /* jiffies of last update of inode */
u64 server_eof; /* current file size on server */
u64 server_eof; /* current file size on server -- protected by i_lock */
u64 uniqueid; /* server inode number */
u64 createtime; /* creation time on server */
#ifdef CONFIG_CIFS_FSCACHE
@ -713,8 +733,8 @@ typedef void (mid_callback_t)(struct mid_q_entry *mid);
/* one of these for every pending CIFS request to the server */
struct mid_q_entry {
struct list_head qhead; /* mids waiting on reply from this server */
__u16 mid; /* multiplex id */
__u16 pid; /* process id */
__u64 mid; /* multiplex id */
__u32 pid; /* process id */
__u32 sequence_number; /* for CIFS signing */
unsigned long when_alloc; /* when mid was created */
#ifdef CONFIG_CIFS_STATS2
@ -724,10 +744,10 @@ struct mid_q_entry {
mid_receive_t *receive; /* call receive callback */
mid_callback_t *callback; /* call completion callback */
void *callback_data; /* general purpose pointer for callback */
struct smb_hdr *resp_buf; /* pointer to received SMB header */
int midState; /* wish this were enum but can not pass to wait_event */
__u8 command; /* smb command code */
bool largeBuf:1; /* if valid response, is pointer to large buf */
void *resp_buf; /* pointer to received SMB header */
int mid_state; /* wish this were enum but can not pass to wait_event */
__le16 command; /* smb command code */
bool large_buf:1; /* if valid response, is pointer to large buf */
bool multiRsp:1; /* multiple trans2 responses for one request */
bool multiEnd:1; /* both received */
};
@ -1052,5 +1072,6 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
void cifs_oplock_break(struct work_struct *work);
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
#endif /* _CIFS_GLOB_H */

View File

@ -77,7 +77,7 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
struct smb_hdr * /* out */ ,
int * /* bytes returned */ , const int long_op);
extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, int flags);
char *in_buf, int flags);
extern int cifs_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
@ -91,9 +91,8 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
extern void cifs_add_credits(struct TCP_Server_Info *server,
const unsigned int add);
extern void cifs_set_credits(struct TCP_Server_Info *server, const int val);
extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
extern bool is_valid_oplock_break(struct smb_hdr *smb,
struct TCP_Server_Info *);
extern int checkSMB(char *buf, unsigned int length);
extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
extern bool backup_cred(struct cifs_sb_info *);
extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
@ -107,7 +106,7 @@ extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
const unsigned short int port);
extern int map_smb_to_linux_error(struct smb_hdr *smb, bool logErr);
extern int map_smb_to_linux_error(char *buf, bool logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifs_tcon *, int /* length of
fixed section (word count) in two byte units */);
@ -116,7 +115,7 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
void **request_buf);
extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp);
extern __u16 GetNextMid(struct TCP_Server_Info *server);
extern __u64 GetNextMid(struct TCP_Server_Info *server);
extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
extern u64 cifs_UnixTimeToNT(struct timespec);
extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
@ -484,18 +483,25 @@ int cifs_async_readv(struct cifs_readdata *rdata);
/* asynchronous write support */
struct cifs_writedata {
struct kref refcount;
struct list_head list;
struct completion done;
enum writeback_sync_modes sync_mode;
struct work_struct work;
struct cifsFileInfo *cfile;
__u64 offset;
pid_t pid;
unsigned int bytes;
int result;
void (*marshal_iov) (struct kvec *iov,
struct cifs_writedata *wdata);
unsigned int nr_pages;
struct page *pages[1];
};
int cifs_async_writev(struct cifs_writedata *wdata);
struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages);
void cifs_writev_complete(struct work_struct *work);
struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
work_func_t complete);
void cifs_writedata_release(struct kref *refcount);
#endif /* _CIFSPROTO_H */

View File

@ -696,7 +696,7 @@ CIFSSMBTDis(const int xid, struct cifs_tcon *tcon)
if (rc)
return rc;
rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0);
rc = SendReceiveNoRsp(xid, tcon->ses, (char *)smb_buffer, 0);
if (rc)
cFYI(1, "Tree disconnect failed %d", rc);
@ -792,7 +792,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
pSMB->hdr.Uid = ses->Suid;
pSMB->AndXCommand = 0xFF;
rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0);
rc = SendReceiveNoRsp(xid, ses, (char *) pSMB, 0);
session_already_dead:
mutex_unlock(&ses->session_mutex);
@ -1414,8 +1414,7 @@ cifs_readdata_free(struct cifs_readdata *rdata)
static int
cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
READ_RSP *rsp = (READ_RSP *)server->smallbuf;
unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length);
unsigned int rfclen = get_rfc1002_length(server->smallbuf);
int remaining = rfclen + 4 - server->total_read;
struct cifs_readdata *rdata = mid->callback_data;
@ -1424,7 +1423,7 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
length = cifs_read_from_socket(server, server->bigbuf,
min_t(unsigned int, remaining,
CIFSMaxBufSize + MAX_CIFS_HDR_SIZE));
CIFSMaxBufSize + max_header_size()));
if (length < 0)
return length;
server->total_read += length;
@ -1435,19 +1434,40 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
return 0;
}
static inline size_t
read_rsp_size(void)
{
return sizeof(READ_RSP);
}
static inline unsigned int
read_data_offset(char *buf)
{
READ_RSP *rsp = (READ_RSP *)buf;
return le16_to_cpu(rsp->DataOffset);
}
static inline unsigned int
read_data_length(char *buf)
{
READ_RSP *rsp = (READ_RSP *)buf;
return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
le16_to_cpu(rsp->DataLength);
}
static int
cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
int length, len;
unsigned int data_offset, remaining, data_len;
struct cifs_readdata *rdata = mid->callback_data;
READ_RSP *rsp = (READ_RSP *)server->smallbuf;
unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length) + 4;
char *buf = server->smallbuf;
unsigned int buflen = get_rfc1002_length(buf) + 4;
u64 eof;
pgoff_t eof_index;
struct page *page, *tpage;
cFYI(1, "%s: mid=%u offset=%llu bytes=%u", __func__,
cFYI(1, "%s: mid=%llu offset=%llu bytes=%u", __func__,
mid->mid, rdata->offset, rdata->bytes);
/*
@ -1455,10 +1475,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
* can if there's not enough data. At this point, we've read down to
* the Mid.
*/
len = min_t(unsigned int, rfclen, sizeof(*rsp)) -
sizeof(struct smb_hdr) + 1;
len = min_t(unsigned int, buflen, read_rsp_size()) - header_size() + 1;
rdata->iov[0].iov_base = server->smallbuf + sizeof(struct smb_hdr) - 1;
rdata->iov[0].iov_base = buf + header_size() - 1;
rdata->iov[0].iov_len = len;
length = cifs_readv_from_socket(server, rdata->iov, 1, len);
@ -1467,7 +1486,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
server->total_read += length;
/* Was the SMB read successful? */
rdata->result = map_smb_to_linux_error(&rsp->hdr, false);
rdata->result = map_smb_to_linux_error(buf, false);
if (rdata->result != 0) {
cFYI(1, "%s: server returned error %d", __func__,
rdata->result);
@ -1475,14 +1494,14 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/* Is there enough to get to the rest of the READ_RSP header? */
if (server->total_read < sizeof(READ_RSP)) {
if (server->total_read < read_rsp_size()) {
cFYI(1, "%s: server returned short header. got=%u expected=%zu",
__func__, server->total_read, sizeof(READ_RSP));
__func__, server->total_read, read_rsp_size());
rdata->result = -EIO;
return cifs_readv_discard(server, mid);
}
data_offset = le16_to_cpu(rsp->DataOffset) + 4;
data_offset = read_data_offset(buf) + 4;
if (data_offset < server->total_read) {
/*
* win2k8 sometimes sends an offset of 0 when the read
@ -1506,7 +1525,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
len = data_offset - server->total_read;
if (len > 0) {
/* read any junk before data into the rest of smallbuf */
rdata->iov[0].iov_base = server->smallbuf + server->total_read;
rdata->iov[0].iov_base = buf + server->total_read;
rdata->iov[0].iov_len = len;
length = cifs_readv_from_socket(server, rdata->iov, 1, len);
if (length < 0)
@ -1515,15 +1534,14 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/* set up first iov for signature check */
rdata->iov[0].iov_base = server->smallbuf;
rdata->iov[0].iov_base = buf;
rdata->iov[0].iov_len = server->total_read;
cFYI(1, "0: iov_base=%p iov_len=%zu",
rdata->iov[0].iov_base, rdata->iov[0].iov_len);
/* how much data is in the response? */
data_len = le16_to_cpu(rsp->DataLengthHigh) << 16;
data_len += le16_to_cpu(rsp->DataLength);
if (data_offset + data_len > rfclen) {
data_len = read_data_length(buf);
if (data_offset + data_len > buflen) {
/* data_len is corrupt -- discard frame */
rdata->result = -EIO;
return cifs_readv_discard(server, mid);
@ -1602,11 +1620,11 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
rdata->bytes = length;
cFYI(1, "total_read=%u rfclen=%u remaining=%u", server->total_read,
rfclen, remaining);
cFYI(1, "total_read=%u buflen=%u remaining=%u", server->total_read,
buflen, remaining);
/* discard anything left over */
if (server->total_read < rfclen)
if (server->total_read < buflen)
return cifs_readv_discard(server, mid);
dequeue_mid(mid, false);
@ -1647,10 +1665,10 @@ cifs_readv_callback(struct mid_q_entry *mid)
struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
cFYI(1, "%s: mid=%u state=%d result=%d bytes=%u", __func__,
mid->mid, mid->midState, rdata->result, rdata->bytes);
cFYI(1, "%s: mid=%llu state=%d result=%d bytes=%u", __func__,
mid->mid, mid->mid_state, rdata->result, rdata->bytes);
switch (mid->midState) {
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
/* result already set, check signature */
if (server->sec_mode &
@ -1671,7 +1689,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
rdata->result = -EIO;
}
queue_work(system_nrt_wq, &rdata->work);
queue_work(cifsiod_wq, &rdata->work);
DeleteMidQEntry(mid);
cifs_add_credits(server, 1);
}
@ -2017,7 +2035,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
kref_put(&wdata->refcount, cifs_writedata_release);
}
static void
void
cifs_writev_complete(struct work_struct *work)
{
struct cifs_writedata *wdata = container_of(work,
@ -2026,7 +2044,9 @@ cifs_writev_complete(struct work_struct *work)
int i = 0;
if (wdata->result == 0) {
spin_lock(&inode->i_lock);
cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
spin_unlock(&inode->i_lock);
cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
wdata->bytes);
} else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
@ -2047,7 +2067,7 @@ cifs_writev_complete(struct work_struct *work)
}
struct cifs_writedata *
cifs_writedata_alloc(unsigned int nr_pages)
cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
{
struct cifs_writedata *wdata;
@ -2061,14 +2081,16 @@ cifs_writedata_alloc(unsigned int nr_pages)
wdata = kzalloc(sizeof(*wdata) +
sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
if (wdata != NULL) {
INIT_WORK(&wdata->work, cifs_writev_complete);
kref_init(&wdata->refcount);
INIT_LIST_HEAD(&wdata->list);
init_completion(&wdata->done);
INIT_WORK(&wdata->work, complete);
}
return wdata;
}
/*
* Check the midState and signature on received buffer (if any), and queue the
* Check the mid_state and signature on received buffer (if any), and queue the
* workqueue completion task.
*/
static void
@ -2079,7 +2101,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
unsigned int written;
WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
switch (mid->midState) {
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
if (wdata->result != 0)
@ -2111,7 +2133,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
break;
}
queue_work(system_nrt_wq, &wdata->work);
queue_work(cifsiod_wq, &wdata->work);
DeleteMidQEntry(mid);
cifs_add_credits(tcon->ses->server, 1);
}
@ -2124,7 +2146,6 @@ cifs_async_writev(struct cifs_writedata *wdata)
WRITE_REQ *smb = NULL;
int wct;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
struct inode *inode = wdata->cfile->dentry->d_inode;
struct kvec *iov = NULL;
if (tcon->ses->capabilities & CAP_LARGE_FILES) {
@ -2148,8 +2169,8 @@ cifs_async_writev(struct cifs_writedata *wdata)
goto async_writev_out;
}
smb->hdr.Pid = cpu_to_le16((__u16)wdata->cfile->pid);
smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->cfile->pid >> 16));
smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid);
smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16));
smb->AndXCommand = 0xFF; /* none */
smb->Fid = wdata->cfile->netfid;
@ -2167,15 +2188,13 @@ cifs_async_writev(struct cifs_writedata *wdata)
iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1;
iov[0].iov_base = smb;
/* marshal up the pages into iov array */
wdata->bytes = 0;
for (i = 0; i < wdata->nr_pages; i++) {
iov[i + 1].iov_len = min(inode->i_size -
page_offset(wdata->pages[i]),
(loff_t)PAGE_CACHE_SIZE);
iov[i + 1].iov_base = kmap(wdata->pages[i]);
wdata->bytes += iov[i + 1].iov_len;
}
/*
* This function should marshal up the page array into the kvec
* array, reserving [0] for the header. It should kmap the pages
* and set the iov_len properly for each one. It may also set
* wdata->bytes too.
*/
wdata->marshal_iov(iov, wdata);
cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
@ -2420,8 +2439,7 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
(struct smb_hdr *) pSMB, &bytes_returned);
cifs_small_buf_release(pSMB);
} else {
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB,
timeout);
rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, timeout);
/* SMB buffer freed by function above */
}
cifs_stats_inc(&tcon->num_locks);
@ -2588,7 +2606,7 @@ CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id)
pSMB->FileID = (__u16) smb_file_id;
pSMB->LastWriteTime = 0xFFFFFFFF;
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
cifs_stats_inc(&tcon->num_closes);
if (rc) {
if (rc != -EINTR) {
@ -2617,7 +2635,7 @@ CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id)
pSMB->FileID = (__u16) smb_file_id;
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
cifs_stats_inc(&tcon->num_flushes);
if (rc)
cERROR(1, "Send error in Flush = %d", rc);
@ -4625,7 +4643,7 @@ CIFSFindClose(const int xid, struct cifs_tcon *tcon,
pSMB->FileID = searchHandle;
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
if (rc)
cERROR(1, "Send error in FindClose = %d", rc);
@ -5646,7 +5664,7 @@ CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size,
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
if (rc) {
cFYI(1, "Send error in SetFileInfo (SetFileSize) = %d", rc);
}
@ -5715,7 +5733,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
if (rc)
cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
@ -5774,7 +5792,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
*data_offset = delete_file ? 1 : 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
if (rc)
cFYI(1, "Send error in SetFileDisposition = %d", rc);
@ -6006,7 +6024,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
cifs_fill_unix_set_info(data_offset, args);
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
if (rc)
cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);

File diff suppressed because it is too large Load Diff

View File

@ -1399,7 +1399,10 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
return rc;
}
/* update the file size (if needed) after a write */
/*
* update the file size (if needed) after a write. Should be called with
* the inode->i_lock held
*/
void
cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written)
@ -1471,7 +1474,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
return rc;
}
} else {
spin_lock(&dentry->d_inode->i_lock);
cifs_update_eof(cifsi, *poffset, bytes_written);
spin_unlock(&dentry->d_inode->i_lock);
*poffset += bytes_written;
}
}
@ -1648,6 +1653,27 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
return rc;
}
/*
* Marshal up the iov array, reserving the first one for the header. Also,
* set wdata->bytes.
*/
static void
cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
{
int i;
struct inode *inode = wdata->cfile->dentry->d_inode;
loff_t size = i_size_read(inode);
/* marshal up the pages into iov array */
wdata->bytes = 0;
for (i = 0; i < wdata->nr_pages; i++) {
iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
(loff_t)PAGE_CACHE_SIZE);
iov[i + 1].iov_base = kmap(wdata->pages[i]);
wdata->bytes += iov[i + 1].iov_len;
}
}
static int cifs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
@ -1684,7 +1710,8 @@ retry:
tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
end - index) + 1;
wdata = cifs_writedata_alloc((unsigned int)tofind);
wdata = cifs_writedata_alloc((unsigned int)tofind,
cifs_writev_complete);
if (!wdata) {
rc = -ENOMEM;
break;
@ -1791,6 +1818,7 @@ retry:
wdata->sync_mode = wbc->sync_mode;
wdata->nr_pages = nr_pages;
wdata->offset = page_offset(wdata->pages[0]);
wdata->marshal_iov = cifs_writepages_marshal_iov;
do {
if (wdata->cfile != NULL)
@ -1802,6 +1830,7 @@ retry:
rc = -EBADF;
break;
}
wdata->pid = wdata->cfile->pid;
rc = cifs_async_writev(wdata);
} while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
@ -2043,7 +2072,7 @@ cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
unsigned long i;
for (i = 0; i < num_pages; i++) {
pages[i] = alloc_page(__GFP_HIGHMEM);
pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
if (!pages[i]) {
/*
* save number of pages we have already allocated and
@ -2051,15 +2080,14 @@ cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
*/
num_pages = i;
rc = -ENOMEM;
goto error;
break;
}
}
return rc;
error:
for (i = 0; i < num_pages; i++)
put_page(pages[i]);
if (rc) {
for (i = 0; i < num_pages; i++)
put_page(pages[i]);
}
return rc;
}
@ -2070,9 +2098,7 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
size_t clen;
clen = min_t(const size_t, len, wsize);
num_pages = clen / PAGE_CACHE_SIZE;
if (clen % PAGE_CACHE_SIZE)
num_pages++;
num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
if (cur_len)
*cur_len = clen;
@ -2080,24 +2106,79 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
return num_pages;
}
static void
cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
{
int i;
size_t bytes = wdata->bytes;
/* marshal up the pages into iov array */
for (i = 0; i < wdata->nr_pages; i++) {
iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
iov[i + 1].iov_base = kmap(wdata->pages[i]);
bytes -= iov[i + 1].iov_len;
}
}
static void
cifs_uncached_writev_complete(struct work_struct *work)
{
int i;
struct cifs_writedata *wdata = container_of(work,
struct cifs_writedata, work);
struct inode *inode = wdata->cfile->dentry->d_inode;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
spin_lock(&inode->i_lock);
cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
if (cifsi->server_eof > inode->i_size)
i_size_write(inode, cifsi->server_eof);
spin_unlock(&inode->i_lock);
complete(&wdata->done);
if (wdata->result != -EAGAIN) {
for (i = 0; i < wdata->nr_pages; i++)
put_page(wdata->pages[i]);
}
kref_put(&wdata->refcount, cifs_writedata_release);
}
/* attempt to send write to server, retry on any -EAGAIN errors */
static int
cifs_uncached_retry_writev(struct cifs_writedata *wdata)
{
int rc;
do {
if (wdata->cfile->invalidHandle) {
rc = cifs_reopen_file(wdata->cfile, false);
if (rc != 0)
continue;
}
rc = cifs_async_writev(wdata);
} while (rc == -EAGAIN);
return rc;
}
static ssize_t
cifs_iovec_write(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset)
{
unsigned int written;
unsigned long num_pages, npages, i;
unsigned long nr_pages, i;
size_t copied, len, cur_len;
ssize_t total_written = 0;
struct kvec *to_send;
struct page **pages;
loff_t offset = *poffset;
struct iov_iter it;
struct inode *inode;
struct cifsFileInfo *open_file;
struct cifs_tcon *pTcon;
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
struct cifs_io_parms io_parms;
int xid, rc;
__u32 pid;
struct cifs_writedata *wdata, *tmp;
struct list_head wdata_list;
int rc;
pid_t pid;
len = iov_length(iov, nr_segs);
if (!len)
@ -2107,103 +2188,103 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
if (rc)
return rc;
INIT_LIST_HEAD(&wdata_list);
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
if (!pages)
return -ENOMEM;
to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
if (!to_send) {
kfree(pages);
return -ENOMEM;
}
rc = cifs_write_allocate_pages(pages, num_pages);
if (rc) {
kfree(pages);
kfree(to_send);
return rc;
}
xid = GetXid();
open_file = file->private_data;
tcon = tlink_tcon(open_file->tlink);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
else
pid = current->tgid;
pTcon = tlink_tcon(open_file->tlink);
inode = file->f_path.dentry->d_inode;
iov_iter_init(&it, iov, nr_segs, len, 0);
npages = num_pages;
do {
size_t save_len = cur_len;
for (i = 0; i < npages; i++) {
copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
copied = iov_iter_copy_from_user(pages[i], &it, 0,
copied);
cur_len -= copied;
iov_iter_advance(&it, copied);
to_send[i+1].iov_base = kmap(pages[i]);
to_send[i+1].iov_len = copied;
}
size_t save_len;
cur_len = save_len - cur_len;
do {
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, false);
if (rc != 0)
break;
}
io_parms.netfid = open_file->netfid;
io_parms.pid = pid;
io_parms.tcon = pTcon;
io_parms.offset = *poffset;
io_parms.length = cur_len;
rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
npages, 0);
} while (rc == -EAGAIN);
for (i = 0; i < npages; i++)
kunmap(pages[i]);
if (written) {
len -= written;
total_written += written;
cifs_update_eof(CIFS_I(inode), *poffset, written);
*poffset += written;
} else if (rc < 0) {
if (!total_written)
total_written = rc;
nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
wdata = cifs_writedata_alloc(nr_pages,
cifs_uncached_writev_complete);
if (!wdata) {
rc = -ENOMEM;
break;
}
/* get length and number of kvecs of the next write */
npages = get_numpages(cifs_sb->wsize, len, &cur_len);
rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
if (rc) {
kfree(wdata);
break;
}
save_len = cur_len;
for (i = 0; i < nr_pages; i++) {
copied = min_t(const size_t, cur_len, PAGE_SIZE);
copied = iov_iter_copy_from_user(wdata->pages[i], &it,
0, copied);
cur_len -= copied;
iov_iter_advance(&it, copied);
}
cur_len = save_len - cur_len;
wdata->sync_mode = WB_SYNC_ALL;
wdata->nr_pages = nr_pages;
wdata->offset = (__u64)offset;
wdata->cfile = cifsFileInfo_get(open_file);
wdata->pid = pid;
wdata->bytes = cur_len;
wdata->marshal_iov = cifs_uncached_marshal_iov;
rc = cifs_uncached_retry_writev(wdata);
if (rc) {
kref_put(&wdata->refcount, cifs_writedata_release);
break;
}
list_add_tail(&wdata->list, &wdata_list);
offset += cur_len;
len -= cur_len;
} while (len > 0);
if (total_written > 0) {
spin_lock(&inode->i_lock);
if (*poffset > inode->i_size)
i_size_write(inode, *poffset);
spin_unlock(&inode->i_lock);
/*
* If at least one write was successfully sent, then discard any rc
* value from the later writes. If the other write succeeds, then
* we'll end up returning whatever was written. If it fails, then
* we'll get a new rc value from that.
*/
if (!list_empty(&wdata_list))
rc = 0;
/*
* Wait for and collect replies for any successful sends in order of
* increasing offset. Once an error is hit or we get a fatal signal
* while waiting, then return without waiting for any more replies.
*/
restart_loop:
list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
if (!rc) {
/* FIXME: freezable too? */
rc = wait_for_completion_killable(&wdata->done);
if (rc)
rc = -EINTR;
else if (wdata->result)
rc = wdata->result;
else
total_written += wdata->bytes;
/* resend call if it's a retryable error */
if (rc == -EAGAIN) {
rc = cifs_uncached_retry_writev(wdata);
goto restart_loop;
}
}
list_del_init(&wdata->list);
kref_put(&wdata->refcount, cifs_writedata_release);
}
cifs_stats_bytes_written(pTcon, total_written);
mark_inode_dirty_sync(inode);
if (total_written > 0)
*poffset += total_written;
for (i = 0; i < num_pages; i++)
put_page(pages[i]);
kfree(to_send);
kfree(pages);
FreeXid(xid);
return total_written;
cifs_stats_bytes_written(tcon, total_written);
return total_written ? total_written : (ssize_t)rc;
}
ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,

View File

@ -213,55 +213,62 @@ cifs_small_buf_release(void *buf_to_free)
}
/*
Find a free multiplex id (SMB mid). Otherwise there could be
mid collisions which might cause problems, demultiplexing the
wrong response to this request. Multiplex ids could collide if
one of a series requests takes much longer than the others, or
if a very large number of long lived requests (byte range
locks or FindNotify requests) are pending. No more than
64K-1 requests can be outstanding at one time. If no
mids are available, return zero. A future optimization
could make the combination of mids and uid the key we use
to demultiplex on (rather than mid alone).
In addition to the above check, the cifs demultiplex
code already used the command code as a secondary
check of the frame and if signing is negotiated the
response would be discarded if the mid were the same
but the signature was wrong. Since the mid is not put in the
pending queue until later (when it is about to be dispatched)
we do have to limit the number of outstanding requests
to somewhat less than 64K-1 although it is hard to imagine
so many threads being in the vfs at one time.
*/
__u16 GetNextMid(struct TCP_Server_Info *server)
* Find a free multiplex id (SMB mid). Otherwise there could be
* mid collisions which might cause problems, demultiplexing the
* wrong response to this request. Multiplex ids could collide if
* one of a series requests takes much longer than the others, or
* if a very large number of long lived requests (byte range
* locks or FindNotify requests) are pending. No more than
* 64K-1 requests can be outstanding at one time. If no
* mids are available, return zero. A future optimization
* could make the combination of mids and uid the key we use
* to demultiplex on (rather than mid alone).
* In addition to the above check, the cifs demultiplex
* code already used the command code as a secondary
* check of the frame and if signing is negotiated the
* response would be discarded if the mid were the same
* but the signature was wrong. Since the mid is not put in the
* pending queue until later (when it is about to be dispatched)
* we do have to limit the number of outstanding requests
* to somewhat less than 64K-1 although it is hard to imagine
* so many threads being in the vfs at one time.
*/
__u64 GetNextMid(struct TCP_Server_Info *server)
{
__u16 mid = 0;
__u16 last_mid;
__u64 mid = 0;
__u16 last_mid, cur_mid;
bool collision;
spin_lock(&GlobalMid_Lock);
last_mid = server->CurrentMid; /* we do not want to loop forever */
server->CurrentMid++;
/* This nested loop looks more expensive than it is.
In practice the list of pending requests is short,
fewer than 50, and the mids are likely to be unique
on the first pass through the loop unless some request
takes longer than the 64 thousand requests before it
(and it would also have to have been a request that
did not time out) */
while (server->CurrentMid != last_mid) {
/* mid is 16 bit only for CIFS/SMB */
cur_mid = (__u16)((server->CurrentMid) & 0xffff);
/* we do not want to loop forever */
last_mid = cur_mid;
cur_mid++;
/*
* This nested loop looks more expensive than it is.
* In practice the list of pending requests is short,
* fewer than 50, and the mids are likely to be unique
* on the first pass through the loop unless some request
* takes longer than the 64 thousand requests before it
* (and it would also have to have been a request that
* did not time out).
*/
while (cur_mid != last_mid) {
struct mid_q_entry *mid_entry;
unsigned int num_mids;
collision = false;
if (server->CurrentMid == 0)
server->CurrentMid++;
if (cur_mid == 0)
cur_mid++;
num_mids = 0;
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
++num_mids;
if (mid_entry->mid == server->CurrentMid &&
mid_entry->midState == MID_REQUEST_SUBMITTED) {
if (mid_entry->mid == cur_mid &&
mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
/* This mid is in use, try a different one */
collision = true;
break;
@ -282,10 +289,11 @@ __u16 GetNextMid(struct TCP_Server_Info *server)
server->tcpStatus = CifsNeedReconnect;
if (!collision) {
mid = server->CurrentMid;
mid = (__u64)cur_mid;
server->CurrentMid = mid;
break;
}
server->CurrentMid++;
cur_mid++;
}
spin_unlock(&GlobalMid_Lock);
return mid;
@ -420,8 +428,10 @@ check_smb_hdr(struct smb_hdr *smb, __u16 mid)
}
int
checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int total_read)
checkSMB(char *buf, unsigned int total_read)
{
struct smb_hdr *smb = (struct smb_hdr *)buf;
__u16 mid = smb->Mid;
__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
__u32 clc_len; /* calculated length */
cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x",
@ -502,8 +512,9 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int total_read)
}
bool
is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
{
struct smb_hdr *buf = (struct smb_hdr *)buffer;
struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
struct list_head *tmp, *tmp1, *tmp2;
struct cifs_ses *ses;
@ -584,7 +595,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
cifs_set_oplock_level(pCifsInode,
pSMB->OplockLevel ? OPLOCK_READ : 0);
queue_work(system_nrt_wq,
queue_work(cifsiod_wq,
&netfile->oplock_break);
netfile->oplock_break_cancelled = false;
@ -604,16 +615,15 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
}
void
dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
dump_smb(void *buf, int smb_buf_length)
{
int i, j;
char debug_line[17];
unsigned char *buffer;
unsigned char *buffer = buf;
if (traceSMB == 0)
return;
buffer = (unsigned char *) smb_buf;
for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
if (i % 8 == 0) {
/* have reached the beginning of line */

View File

@ -836,8 +836,9 @@ ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode)
}
int
map_smb_to_linux_error(struct smb_hdr *smb, bool logErr)
map_smb_to_linux_error(char *buf, bool logErr)
{
struct smb_hdr *smb = (struct smb_hdr *)buf;
unsigned int i;
int rc = -EIO; /* if transport error smb error may not be set */
__u8 smberrclass;

View File

@ -60,8 +60,8 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
memset(temp, 0, sizeof(struct mid_q_entry));
temp->mid = smb_buffer->Mid; /* always LE */
temp->pid = current->pid;
temp->command = smb_buffer->Command;
cFYI(1, "For smb_command %d", temp->command);
temp->command = cpu_to_le16(smb_buffer->Command);
cFYI(1, "For smb_command %d", smb_buffer->Command);
/* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
/* when mid allocated can be before when sent */
temp->when_alloc = jiffies;
@ -75,7 +75,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
}
atomic_inc(&midCount);
temp->midState = MID_REQUEST_ALLOCATED;
temp->mid_state = MID_REQUEST_ALLOCATED;
return temp;
}
@ -85,9 +85,9 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
#ifdef CONFIG_CIFS_STATS2
unsigned long now;
#endif
midEntry->midState = MID_FREE;
midEntry->mid_state = MID_FREE;
atomic_dec(&midCount);
if (midEntry->largeBuf)
if (midEntry->large_buf)
cifs_buf_release(midEntry->resp_buf);
else
cifs_small_buf_release(midEntry->resp_buf);
@ -97,8 +97,8 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
something is wrong, unless it is quite a slow link or server */
if ((now - midEntry->when_alloc) > HZ) {
if ((cifsFYI & CIFS_TIMER) &&
(midEntry->command != SMB_COM_LOCKING_ANDX)) {
printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
(midEntry->command != cpu_to_le16(SMB_COM_LOCKING_ANDX))) {
printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
midEntry->command, midEntry->mid);
printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
now - midEntry->when_alloc,
@ -126,11 +126,11 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
int rc = 0;
int i = 0;
struct msghdr smb_msg;
struct smb_hdr *smb_buffer = iov[0].iov_base;
__be32 *buf_len = (__be32 *)(iov[0].iov_base);
unsigned int len = iov[0].iov_len;
unsigned int total_len;
int first_vec = 0;
unsigned int smb_buf_length = be32_to_cpu(smb_buffer->smb_buf_length);
unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
struct socket *ssocket = server->ssocket;
if (ssocket == NULL)
@ -150,7 +150,7 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
total_len += iov[i].iov_len;
cFYI(1, "Sending smb: total_len %d", total_len);
dump_smb(smb_buffer, len);
dump_smb(iov[0].iov_base, len);
i = 0;
while (total_len) {
@ -158,24 +158,24 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
n_vec - first_vec, total_len);
if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
i++;
/* if blocking send we try 3 times, since each can block
for 5 seconds. For nonblocking we have to try more
but wait increasing amounts of time allowing time for
socket to clear. The overall time we wait in either
case to send on the socket is about 15 seconds.
Similarly we wait for 15 seconds for
a response from the server in SendReceive[2]
for the server to send a response back for
most types of requests (except SMB Write
past end of file which can be slow, and
blocking lock operations). NFS waits slightly longer
than CIFS, but this can make it take longer for
nonresponsive servers to be detected and 15 seconds
is more than enough time for modern networks to
send a packet. In most cases if we fail to send
after the retries we will kill the socket and
reconnect which may clear the network problem.
*/
/*
* If blocking send we try 3 times, since each can block
* for 5 seconds. For nonblocking we have to try more
* but wait increasing amounts of time allowing time for
* socket to clear. The overall time we wait in either
* case to send on the socket is about 15 seconds.
* Similarly we wait for 15 seconds for a response from
* the server in SendReceive[2] for the server to send
* a response back for most types of requests (except
* SMB Write past end of file which can be slow, and
* blocking lock operations). NFS waits slightly longer
* than CIFS, but this can make it take longer for
* nonresponsive servers to be detected and 15 seconds
* is more than enough time for modern networks to
* send a packet. In most cases if we fail to send
* after the retries we will kill the socket and
* reconnect which may clear the network problem.
*/
if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
cERROR(1, "sends on sock %p stuck for 15 seconds",
ssocket);
@ -235,9 +235,8 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
else
rc = 0;
/* Don't want to modify the buffer as a
side effect of this call. */
smb_buffer->smb_buf_length = cpu_to_be32(smb_buf_length);
/* Don't want to modify the buffer as a side effect of this call. */
*buf_len = cpu_to_be32(smb_buf_length);
return rc;
}
@ -342,13 +341,40 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
int error;
error = wait_event_freezekillable(server->response_q,
midQ->midState != MID_REQUEST_SUBMITTED);
midQ->mid_state != MID_REQUEST_SUBMITTED);
if (error < 0)
return -ERESTARTSYS;
return 0;
}
static int
cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
unsigned int nvec, struct mid_q_entry **ret_mid)
{
int rc;
struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
struct mid_q_entry *mid;
/* enable signing if server requires it */
if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
mid = AllocMidQEntry(hdr, server);
if (mid == NULL)
return -ENOMEM;
/* put it on the pending_mid_q */
spin_lock(&GlobalMid_Lock);
list_add_tail(&mid->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
if (rc)
delete_mid(mid);
*ret_mid = mid;
return rc;
}
/*
* Send a SMB request and set the callback function in the mid to handle
@ -361,40 +387,24 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
{
int rc;
struct mid_q_entry *mid;
struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
if (rc)
return rc;
/* enable signing if server requires it */
if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
mutex_lock(&server->srv_mutex);
mid = AllocMidQEntry(hdr, server);
if (mid == NULL) {
rc = cifs_setup_async_request(server, iov, nvec, &mid);
if (rc) {
mutex_unlock(&server->srv_mutex);
cifs_add_credits(server, 1);
wake_up(&server->request_q);
return -ENOMEM;
}
/* put it on the pending_mid_q */
spin_lock(&GlobalMid_Lock);
list_add_tail(&mid->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
if (rc) {
mutex_unlock(&server->srv_mutex);
goto out_err;
return rc;
}
mid->receive = receive;
mid->callback = callback;
mid->callback_data = cbdata;
mid->midState = MID_REQUEST_SUBMITTED;
mid->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(server);
rc = smb_sendv(server, iov, nvec);
@ -424,14 +434,14 @@ out_err:
*/
int
SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, int flags)
char *in_buf, int flags)
{
int rc;
struct kvec iov[1];
int resp_buf_type;
iov[0].iov_base = (char *)in_buf;
iov[0].iov_len = be32_to_cpu(in_buf->smb_buf_length) + 4;
iov[0].iov_base = in_buf;
iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
flags |= CIFS_NO_RESP;
rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
@ -444,11 +454,11 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
{
int rc = 0;
cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
mid->mid, mid->midState);
cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
le16_to_cpu(mid->command), mid->mid, mid->mid_state);
spin_lock(&GlobalMid_Lock);
switch (mid->midState) {
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
spin_unlock(&GlobalMid_Lock);
return rc;
@ -463,8 +473,8 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
break;
default:
list_del_init(&mid->qhead);
cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
mid->mid, mid->midState);
cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
mid->mid, mid->mid_state);
rc = -EIO;
}
spin_unlock(&GlobalMid_Lock);
@ -514,7 +524,7 @@ int
cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
bool log_error)
{
unsigned int len = be32_to_cpu(mid->resp_buf->smb_buf_length) + 4;
unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
dump_smb(mid->resp_buf, min_t(u32, 92, len));
@ -534,6 +544,24 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
return map_smb_to_linux_error(mid->resp_buf, log_error);
}
static int
cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
unsigned int nvec, struct mid_q_entry **ret_mid)
{
int rc;
struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
struct mid_q_entry *mid;
rc = allocate_mid(ses, hdr, &mid);
if (rc)
return rc;
rc = cifs_sign_smb2(iov, nvec, ses->server, &mid->sequence_number);
if (rc)
delete_mid(mid);
*ret_mid = mid;
return rc;
}
int
SendReceive2(const unsigned int xid, struct cifs_ses *ses,
struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
@ -542,55 +570,53 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
int rc = 0;
int long_op;
struct mid_q_entry *midQ;
struct smb_hdr *in_buf = iov[0].iov_base;
char *buf = iov[0].iov_base;
long_op = flags & CIFS_TIMEOUT_MASK;
*pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
if ((ses == NULL) || (ses->server == NULL)) {
cifs_small_buf_release(in_buf);
cifs_small_buf_release(buf);
cERROR(1, "Null session");
return -EIO;
}
if (ses->server->tcpStatus == CifsExiting) {
cifs_small_buf_release(in_buf);
cifs_small_buf_release(buf);
return -ENOENT;
}
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */
/*
* Ensure that we do not send more than 50 overlapping requests
* to the same server. We may make this configurable later or
* use ses->maxReq.
*/
rc = wait_for_free_request(ses->server, long_op);
if (rc) {
cifs_small_buf_release(in_buf);
cifs_small_buf_release(buf);
return rc;
}
/* make sure that we sign in the same order that we send on this socket
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
/*
* Make sure that we sign in the same order that we send on this socket
* and avoid races inside tcp sendmsg code that could cause corruption
* of smb data.
*/
mutex_lock(&ses->server->srv_mutex);
rc = allocate_mid(ses, in_buf, &midQ);
rc = cifs_setup_request(ses, iov, n_vec, &midQ);
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
cifs_small_buf_release(in_buf);
cifs_small_buf_release(buf);
/* Update # of requests on wire to server */
cifs_add_credits(ses->server, 1);
return rc;
}
rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
cifs_small_buf_release(in_buf);
goto out;
}
midQ->midState = MID_REQUEST_SUBMITTED;
midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(ses->server);
rc = smb_sendv(ses->server, iov, n_vec);
cifs_in_send_dec(ses->server);
@ -599,30 +625,30 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
mutex_unlock(&ses->server->srv_mutex);
if (rc < 0) {
cifs_small_buf_release(in_buf);
cifs_small_buf_release(buf);
goto out;
}
if (long_op == CIFS_ASYNC_OP) {
cifs_small_buf_release(in_buf);
cifs_small_buf_release(buf);
goto out;
}
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
send_nt_cancel(ses->server, in_buf, midQ);
send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->midState == MID_REQUEST_SUBMITTED) {
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
cifs_small_buf_release(in_buf);
cifs_small_buf_release(buf);
cifs_add_credits(ses->server, 1);
return rc;
}
spin_unlock(&GlobalMid_Lock);
}
cifs_small_buf_release(in_buf);
cifs_small_buf_release(buf);
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
@ -630,15 +656,16 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
return rc;
}
if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
rc = -EIO;
cFYI(1, "Bad MID state?");
goto out;
}
iov[0].iov_base = (char *)midQ->resp_buf;
iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
if (midQ->largeBuf)
buf = (char *)midQ->resp_buf;
iov[0].iov_base = buf;
iov[0].iov_len = get_rfc1002_length(buf) + 4;
if (midQ->large_buf)
*pRespBufType = CIFS_LARGE_BUFFER;
else
*pRespBufType = CIFS_SMALL_BUFFER;
@ -710,7 +737,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
goto out;
}
midQ->midState = MID_REQUEST_SUBMITTED;
midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(ses->server);
rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
@ -728,7 +755,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc != 0) {
send_nt_cancel(ses->server, in_buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->midState == MID_REQUEST_SUBMITTED) {
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
@ -745,13 +772,13 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
}
if (!midQ->resp_buf || !out_buf ||
midQ->midState != MID_RESPONSE_RECEIVED) {
midQ->mid_state != MID_RESPONSE_RECEIVED) {
rc = -EIO;
cERROR(1, "Bad MID state?");
goto out;
}
*pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
rc = cifs_check_receive(midQ, ses->server, 0);
out:
@ -844,7 +871,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
midQ->midState = MID_REQUEST_SUBMITTED;
midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(ses->server);
rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
cifs_in_send_dec(ses->server);
@ -858,13 +885,13 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
/* Wait for a reply - allow signals to interrupt. */
rc = wait_event_interruptible(ses->server->response_q,
(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
((ses->server->tcpStatus != CifsGood) &&
(ses->server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */
if ((rc == -ERESTARTSYS) &&
(midQ->midState == MID_REQUEST_SUBMITTED) &&
(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
((ses->server->tcpStatus == CifsGood) ||
(ses->server->tcpStatus == CifsNew))) {
@ -894,7 +921,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
if (rc) {
send_nt_cancel(ses->server, in_buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->midState == MID_REQUEST_SUBMITTED) {
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
@ -912,13 +939,13 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
/* rcvd frame is ok */
if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
rc = -EIO;
cERROR(1, "Bad MID state?");
goto out;
}
*pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
rc = cifs_check_receive(midQ, ses->server, 0);
out: