linux/fs/cifs/file.c

5005 lines
127 KiB
C
Raw Normal View History

// SPDX-License-Identifier: LGPL-2.1
/*
*
* vfs operations that deal with files
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org)
*
*/
#include <linux/fs.h>
#include <linux/backing-dev.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/delay.h>
#include <linux/mount.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/mm.h>
#include <asm/div64.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
#include "fscache.h"
#include "smbdirect.h"
#include "fs_context.h"
#include "cifs_ioctl.h"
static inline int cifs_convert_flags(unsigned int flags)
{
if ((flags & O_ACCMODE) == O_RDONLY)
return GENERIC_READ;
else if ((flags & O_ACCMODE) == O_WRONLY)
return GENERIC_WRITE;
else if ((flags & O_ACCMODE) == O_RDWR) {
/* GENERIC_ALL is too much permission to request
can cause unnecessary access denied on create */
/* return GENERIC_ALL; */
return (GENERIC_READ | GENERIC_WRITE);
}
return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
FILE_READ_DATA);
}
static u32 cifs_posix_convert_flags(unsigned int flags)
{
u32 posix_flags = 0;
if ((flags & O_ACCMODE) == O_RDONLY)
posix_flags = SMB_O_RDONLY;
else if ((flags & O_ACCMODE) == O_WRONLY)
posix_flags = SMB_O_WRONLY;
else if ((flags & O_ACCMODE) == O_RDWR)
posix_flags = SMB_O_RDWR;
if (flags & O_CREAT) {
posix_flags |= SMB_O_CREAT;
if (flags & O_EXCL)
posix_flags |= SMB_O_EXCL;
} else if (flags & O_EXCL)
cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
current->comm, current->tgid);
if (flags & O_TRUNC)
posix_flags |= SMB_O_TRUNC;
/* be safe and imply O_SYNC for O_DSYNC */
vfs: Implement proper O_SYNC semantics While Linux provided an O_SYNC flag basically since day 1, it took until Linux 2.4.0-test12pre2 to actually get it implemented for filesystems, since that day we had generic_osync_around with only minor changes and the great "For now, when the user asks for O_SYNC, we'll actually give O_DSYNC" comment. This patch intends to actually give us real O_SYNC semantics in addition to the O_DSYNC semantics. After Jan's O_SYNC patches which are required before this patch it's actually surprisingly simple, we just need to figure out when to set the datasync flag to vfs_fsync_range and when not. This patch renames the existing O_SYNC flag to O_DSYNC while keeping it's numerical value to keep binary compatibility, and adds a new real O_SYNC flag. To guarantee backwards compatiblity it is defined as expanding to both the O_DSYNC and the new additional binary flag (__O_SYNC) to make sure we are backwards-compatible when compiled against the new headers. This also means that all places that don't care about the differences can just check O_DSYNC and get the right behaviour for O_SYNC, too - only places that actuall care need to check __O_SYNC in addition. Drivers and network filesystems have been updated in a fail safe way to always do the full sync magic if O_DSYNC is set. The few places setting O_SYNC for lower layers are kept that way for now to stay failsafe. We enforce that O_DSYNC is set when __O_SYNC is set early in the open path to make sure we always get these sane options. Note that parisc really screwed up their headers as they already define a O_DSYNC that has always been a no-op. We try to repair it by using it for the new O_DSYNC and redefinining O_SYNC to send both the traditional O_SYNC numerical value _and_ the O_DSYNC one. Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Grant Grundler <grundler@parisc-linux.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andreas Dilger <adilger@sun.com> Acked-by: Trond Myklebust <Trond.Myklebust@netapp.com> Acked-by: Kyle McMartin <kyle@mcmartin.ca> Acked-by: Ulrich Drepper <drepper@redhat.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jan Kara <jack@suse.cz>
2009-10-27 10:05:28 +00:00
if (flags & O_DSYNC)
posix_flags |= SMB_O_SYNC;
if (flags & O_DIRECTORY)
posix_flags |= SMB_O_DIRECTORY;
if (flags & O_NOFOLLOW)
posix_flags |= SMB_O_NOFOLLOW;
if (flags & O_DIRECT)
posix_flags |= SMB_O_DIRECT;
return posix_flags;
}
static inline int cifs_get_disposition(unsigned int flags)
{
if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
return FILE_CREATE;
else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
return FILE_OVERWRITE_IF;
else if ((flags & O_CREAT) == O_CREAT)
return FILE_OPEN_IF;
else if ((flags & O_TRUNC) == O_TRUNC)
return FILE_OVERWRITE;
else
return FILE_OPEN;
}
int cifs_posix_open(const char *full_path, struct inode **pinode,
struct super_block *sb, int mode, unsigned int f_flags,
__u32 *poplock, __u16 *pnetfid, unsigned int xid)
{
int rc;
FILE_UNIX_BASIC_INFO *presp_data;
__u32 posix_flags = 0;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_fattr fattr;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
cifs_dbg(FYI, "posix open %s\n", full_path);
presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
if (presp_data == NULL)
return -ENOMEM;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
goto posix_open_ret;
}
tcon = tlink_tcon(tlink);
mode &= ~current_umask();
posix_flags = cifs_posix_convert_flags(f_flags);
rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
poplock, full_path, cifs_sb->local_nls,
cifs_remap(cifs_sb));
cifs_put_tlink(tlink);
if (rc)
goto posix_open_ret;
if (presp_data->Type == cpu_to_le32(-1))
goto posix_open_ret; /* open ok, caller does qpathinfo */
if (!pinode)
goto posix_open_ret; /* caller does not need info */
cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
/* get new inode and set it up */
if (*pinode == NULL) {
cifs_fill_uniqueid(sb, &fattr);
*pinode = cifs_iget(sb, &fattr);
if (!*pinode) {
rc = -ENOMEM;
goto posix_open_ret;
}
} else {
cifs_revalidate_mapping(*pinode);
rc = cifs_fattr_to_inode(*pinode, &fattr);
}
posix_open_ret:
kfree(presp_data);
return rc;
}
static int
cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
struct cifs_fid *fid, unsigned int xid)
{
int rc;
int desired_access;
int disposition;
int create_options = CREATE_NOT_DIR;
FILE_ALL_INFO *buf;
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
if (!server->ops->open)
return -ENOSYS;
desired_access = cifs_convert_flags(f_flags);
/*********************************************************************
* open flag mapping table:
*
* POSIX Flag CIFS Disposition
* ---------- ----------------
* O_CREAT FILE_OPEN_IF
* O_CREAT | O_EXCL FILE_CREATE
* O_CREAT | O_TRUNC FILE_OVERWRITE_IF
* O_TRUNC FILE_OVERWRITE
* none of the above FILE_OPEN
*
* Note that there is not a direct match between disposition
* FILE_SUPERSEDE (ie create whether or not file exists although
* O_CREAT | O_TRUNC is similar but truncates the existing
* file rather than creating a new file as FILE_SUPERSEDE does
* (which uses the attributes / metadata passed in on open call)
*?
*? O_SYNC is a reasonable match to CIFS writethrough flag
*? and the read write flags match reasonably. O_LARGEFILE
*? is irrelevant because largefile support is always used
*? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
* O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
*********************************************************************/
disposition = cifs_get_disposition(f_flags);
/* BB pass O_SYNC flag through on file attributes .. BB */
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* O_SYNC also has bit for O_DSYNC so following check picks up either */
if (f_flags & O_SYNC)
create_options |= CREATE_WRITE_THROUGH;
if (f_flags & O_DIRECT)
create_options |= CREATE_NO_BUFFER;
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = desired_access;
oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.disposition = disposition;
oparms.path = full_path;
oparms.fid = fid;
oparms.reconnect = false;
rc = server->ops->open(xid, &oparms, oplock, buf);
if (rc)
goto out;
/* TODO: Add support for calling posix query info but with passing in fid */
if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
xid);
else
rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
2014-02-10 20:08:16 +00:00
xid, fid);
if (rc) {
server->ops->close(xid, tcon, fid);
if (rc == -ESTALE)
rc = -EOPENSTALE;
}
out:
kfree(buf);
return rc;
}
static bool
cifs_has_mand_locks(struct cifsInodeInfo *cinode)
{
struct cifs_fid_locks *cur;
bool has_locks = false;
down_read(&cinode->lock_sem);
list_for_each_entry(cur, &cinode->llist, llist) {
if (!list_empty(&cur->locks)) {
has_locks = true;
break;
}
}
up_read(&cinode->lock_sem);
return has_locks;
}
void
cifs_down_write(struct rw_semaphore *sem)
{
while (!down_write_trylock(sem))
msleep(10);
}
cifs: move cifsFileInfo_put logic into a work-queue This patch moves the final part of the cifsFileInfo_put() logic where we need a write lock on lock_sem to be processed in a separate thread that holds no other locks. This is to prevent deadlocks like the one below: > there are 6 processes looping to while trying to down_write > cinode->lock_sem, 5 of them from _cifsFileInfo_put, and one from > cifs_new_fileinfo > > and there are 5 other processes which are blocked, several of them > waiting on either PG_writeback or PG_locked (which are both set), all > for the same page of the file > > 2 inode_lock() (inode->i_rwsem) for the file > 1 wait_on_page_writeback() for the page > 1 down_read(inode->i_rwsem) for the inode of the directory > 1 inode_lock()(inode->i_rwsem) for the inode of the directory > 1 __lock_page > > > so processes are blocked waiting on: > page flags PG_locked and PG_writeback for one specific page > inode->i_rwsem for the directory > inode->i_rwsem for the file > cifsInodeInflock_sem > > > > here are the more gory details (let me know if I need to provide > anything more/better): > > [0 00:48:22.765] [UN] PID: 8863 TASK: ffff8c691547c5c0 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff9965007e3ba8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007e3c38] schedule at ffffffff9b6e64df > #2 [ffff9965007e3c48] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965007e3cb8] legitimize_path at ffffffff9b0f975d > #4 [ffff9965007e3d08] path_openat at ffffffff9b0fe55d > #5 [ffff9965007e3dd8] do_filp_open at ffffffff9b100a33 > #6 [ffff9965007e3ee0] do_sys_open at ffffffff9b0eb2d6 > #7 [ffff9965007e3f38] do_syscall_64 at ffffffff9ae04315 > * (I think legitimize_path is bogus) > > in path_openat > } else { > const char *s = path_init(nd, flags); > while (!(error = link_path_walk(s, nd)) && > (error = do_last(nd, file, op)) > 0) { <<<< > > do_last: > if (open_flag & O_CREAT) > inode_lock(dir->d_inode); <<<< > else > so it's trying to take inode->i_rwsem for the directory > > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68bb8e79c0 ffff8c691158ef20 ffff8c6915bf9000 DIR /mnt/vm1_smb/ > inode.i_rwsem is ffff8c691158efc0 > > <struct rw_semaphore 0xffff8c691158efc0>: > owner: <struct task_struct 0xffff8c6914275d00> (UN - 8856 - > reopen_file), counter: 0x0000000000000003 > waitlist: 2 > 0xffff9965007e3c90 8863 reopen_file UN 0 1:29:22.926 > RWSEM_WAITING_FOR_WRITE > 0xffff996500393e00 9802 ls UN 0 1:17:26.700 > RWSEM_WAITING_FOR_READ > > > the owner of the inode.i_rwsem of the directory is: > > [0 00:00:00.109] [UN] PID: 8856 TASK: ffff8c6914275d00 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff99650065b828] __schedule at ffffffff9b6e6095 > #1 [ffff99650065b8b8] schedule at ffffffff9b6e64df > #2 [ffff99650065b8c8] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff99650065b940] msleep at ffffffff9af573a9 > #4 [ffff99650065b948] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff99650065ba38] cifs_writepage_locked at ffffffffc0a0b8f3 [cifs] > #6 [ffff99650065bab0] cifs_launder_page at ffffffffc0a0bb72 [cifs] > #7 [ffff99650065bb30] invalidate_inode_pages2_range at ffffffff9b04d4bd > #8 [ffff99650065bcb8] cifs_invalidate_mapping at ffffffffc0a11339 [cifs] > #9 [ffff99650065bcd0] cifs_revalidate_mapping at ffffffffc0a1139a [cifs] > #10 [ffff99650065bcf0] cifs_d_revalidate at ffffffffc0a014f6 [cifs] > #11 [ffff99650065bd08] path_openat at ffffffff9b0fe7f7 > #12 [ffff99650065bdd8] do_filp_open at ffffffff9b100a33 > #13 [ffff99650065bee0] do_sys_open at ffffffff9b0eb2d6 > #14 [ffff99650065bf38] do_syscall_64 at ffffffff9ae04315 > > cifs_launder_page is for page 0xffffd1e2c07d2480 > > crash> page.index,mapping,flags 0xffffd1e2c07d2480 > index = 0x8 > mapping = 0xffff8c68f3cd0db0 > flags = 0xfffffc0008095 > > PAGE-FLAG BIT VALUE > PG_locked 0 0000001 > PG_uptodate 2 0000004 > PG_lru 4 0000010 > PG_waiters 7 0000080 > PG_writeback 15 0008000 > > > inode is ffff8c68f3cd0c40 > inode.i_rwsem is ffff8c68f3cd0ce0 > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68a1f1b480 ffff8c68f3cd0c40 ffff8c6915bf9000 REG > /mnt/vm1_smb/testfile.8853 > > > this process holds the inode->i_rwsem for the parent directory, is > laundering a page attached to the inode of the file it's opening, and in > _cifsFileInfo_put is trying to down_write the cifsInodeInflock_sem > for the file itself. > > > <struct rw_semaphore 0xffff8c68f3cd0ce0>: > owner: <struct task_struct 0xffff8c6914272e80> (UN - 8854 - > reopen_file), counter: 0x0000000000000003 > waitlist: 1 > 0xffff9965005dfd80 8855 reopen_file UN 0 1:29:22.912 > RWSEM_WAITING_FOR_WRITE > > this is the inode.i_rwsem for the file > > the owner: > > [0 00:48:22.739] [UN] PID: 8854 TASK: ffff8c6914272e80 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff99650054fb38] __schedule at ffffffff9b6e6095 > #1 [ffff99650054fbc8] schedule at ffffffff9b6e64df > #2 [ffff99650054fbd8] io_schedule at ffffffff9b6e68e2 > #3 [ffff99650054fbe8] __lock_page at ffffffff9b03c56f > #4 [ffff99650054fc80] pagecache_get_page at ffffffff9b03dcdf > #5 [ffff99650054fcc0] grab_cache_page_write_begin at ffffffff9b03ef4c > #6 [ffff99650054fcd0] cifs_write_begin at ffffffffc0a064ec [cifs] > #7 [ffff99650054fd30] generic_perform_write at ffffffff9b03bba4 > #8 [ffff99650054fda8] __generic_file_write_iter at ffffffff9b04060a > #9 [ffff99650054fdf0] cifs_strict_writev.cold.70 at ffffffffc0a4469b [cifs] > #10 [ffff99650054fe48] new_sync_write at ffffffff9b0ec1dd > #11 [ffff99650054fed0] vfs_write at ffffffff9b0eed35 > #12 [ffff99650054ff00] ksys_write at ffffffff9b0eefd9 > #13 [ffff99650054ff38] do_syscall_64 at ffffffff9ae04315 > > the process holds the inode->i_rwsem for the file to which it's writing, > and is trying to __lock_page for the same page as in the other processes > > > the other tasks: > [0 00:00:00.028] [UN] PID: 8859 TASK: ffff8c6915479740 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff9965007b39d8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007b3a68] schedule at ffffffff9b6e64df > #2 [ffff9965007b3a78] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007b3af0] msleep at ffffffff9af573a9 > #4 [ffff9965007b3af8] cifs_new_fileinfo.cold.61 at ffffffffc0a42a07 [cifs] > #5 [ffff9965007b3b78] cifs_open at ffffffffc0a0709d [cifs] > #6 [ffff9965007b3cd8] do_dentry_open at ffffffff9b0e9b7a > #7 [ffff9965007b3d08] path_openat at ffffffff9b0fe34f > #8 [ffff9965007b3dd8] do_filp_open at ffffffff9b100a33 > #9 [ffff9965007b3ee0] do_sys_open at ffffffff9b0eb2d6 > #10 [ffff9965007b3f38] do_syscall_64 at ffffffff9ae04315 > > this is opening the file, and is trying to down_write cinode->lock_sem > > > [0 00:00:00.041] [UN] PID: 8860 TASK: ffff8c691547ae80 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.057] [UN] PID: 8861 TASK: ffff8c6915478000 CPU: 3 > COMMAND: "reopen_file" > [0 00:00:00.059] [UN] PID: 8858 TASK: ffff8c6914271740 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.109] [UN] PID: 8862 TASK: ffff8c691547dd00 CPU: 6 > COMMAND: "reopen_file" > #0 [ffff9965007c3c78] __schedule at ffffffff9b6e6095 > #1 [ffff9965007c3d08] schedule at ffffffff9b6e64df > #2 [ffff9965007c3d18] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007c3d90] msleep at ffffffff9af573a9 > #4 [ffff9965007c3d98] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff9965007c3e88] cifs_close at ffffffffc0a07aaf [cifs] > #6 [ffff9965007c3ea0] __fput at ffffffff9b0efa6e > #7 [ffff9965007c3ee8] task_work_run at ffffffff9aef1614 > #8 [ffff9965007c3f20] exit_to_usermode_loop at ffffffff9ae03d6f > #9 [ffff9965007c3f38] do_syscall_64 at ffffffff9ae0444c > > closing the file, and trying to down_write cifsi->lock_sem > > > [0 00:48:22.839] [UN] PID: 8857 TASK: ffff8c6914270000 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965006a7cc8] __schedule at ffffffff9b6e6095 > #1 [ffff9965006a7d58] schedule at ffffffff9b6e64df > #2 [ffff9965006a7d68] io_schedule at ffffffff9b6e68e2 > #3 [ffff9965006a7d78] wait_on_page_bit at ffffffff9b03cac6 > #4 [ffff9965006a7e10] __filemap_fdatawait_range at ffffffff9b03b028 > #5 [ffff9965006a7ed8] filemap_write_and_wait at ffffffff9b040165 > #6 [ffff9965006a7ef0] cifs_flush at ffffffffc0a0c2fa [cifs] > #7 [ffff9965006a7f10] filp_close at ffffffff9b0e93f1 > #8 [ffff9965006a7f30] __x64_sys_close at ffffffff9b0e9a0e > #9 [ffff9965006a7f38] do_syscall_64 at ffffffff9ae04315 > > in __filemap_fdatawait_range > wait_on_page_writeback(page); > for the same page of the file > > > > [0 00:48:22.718] [UN] PID: 8855 TASK: ffff8c69142745c0 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965005dfc98] __schedule at ffffffff9b6e6095 > #1 [ffff9965005dfd28] schedule at ffffffff9b6e64df > #2 [ffff9965005dfd38] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965005dfdf0] cifs_strict_writev at ffffffffc0a0c40a [cifs] > #4 [ffff9965005dfe48] new_sync_write at ffffffff9b0ec1dd > #5 [ffff9965005dfed0] vfs_write at ffffffff9b0eed35 > #6 [ffff9965005dff00] ksys_write at ffffffff9b0eefd9 > #7 [ffff9965005dff38] do_syscall_64 at ffffffff9ae04315 > > inode_lock(inode); > > > and one 'ls' later on, to see whether the rest of the mount is available > (the test file is in the root, so we get blocked up on the directory > ->i_rwsem), so the entire mount is unavailable > > [0 00:36:26.473] [UN] PID: 9802 TASK: ffff8c691436ae80 CPU: 4 > COMMAND: "ls" > #0 [ffff996500393d28] __schedule at ffffffff9b6e6095 > #1 [ffff996500393db8] schedule at ffffffff9b6e64df > #2 [ffff996500393dc8] rwsem_down_read_slowpath at ffffffff9b6e9421 > #3 [ffff996500393e78] down_read_killable at ffffffff9b6e95e2 > #4 [ffff996500393e88] iterate_dir at ffffffff9b103c56 > #5 [ffff996500393ec8] ksys_getdents64 at ffffffff9b104b0c > #6 [ffff996500393f30] __x64_sys_getdents64 at ffffffff9b104bb6 > #7 [ffff996500393f38] do_syscall_64 at ffffffff9ae04315 > > in iterate_dir: > if (shared) > res = down_read_killable(&inode->i_rwsem); <<<< > else > res = down_write_killable(&inode->i_rwsem); > Reported-by: Frank Sorenson <sorenson@redhat.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-11-03 03:06:37 +00:00
static void cifsFileInfo_put_work(struct work_struct *work);
struct cifsFileInfo *
cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct tcon_link *tlink, __u32 oplock)
{
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifsFileInfo *cfile;
struct cifs_fid_locks *fdlocks;
struct cifs_tcon *tcon = tlink_tcon(tlink);
struct TCP_Server_Info *server = tcon->ses->server;
cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
if (cfile == NULL)
return cfile;
fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
if (!fdlocks) {
kfree(cfile);
return NULL;
}
INIT_LIST_HEAD(&fdlocks->locks);
fdlocks->cfile = cfile;
cfile->llist = fdlocks;
cfile->count = 1;
cfile->pid = current->tgid;
cfile->uid = current_fsuid();
cfile->dentry = dget(dentry);
cfile->f_flags = file->f_flags;
cfile->invalidHandle = false;
cfile->deferred_close_scheduled = false;
cfile->tlink = cifs_get_tlink(tlink);
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
cifs: move cifsFileInfo_put logic into a work-queue This patch moves the final part of the cifsFileInfo_put() logic where we need a write lock on lock_sem to be processed in a separate thread that holds no other locks. This is to prevent deadlocks like the one below: > there are 6 processes looping to while trying to down_write > cinode->lock_sem, 5 of them from _cifsFileInfo_put, and one from > cifs_new_fileinfo > > and there are 5 other processes which are blocked, several of them > waiting on either PG_writeback or PG_locked (which are both set), all > for the same page of the file > > 2 inode_lock() (inode->i_rwsem) for the file > 1 wait_on_page_writeback() for the page > 1 down_read(inode->i_rwsem) for the inode of the directory > 1 inode_lock()(inode->i_rwsem) for the inode of the directory > 1 __lock_page > > > so processes are blocked waiting on: > page flags PG_locked and PG_writeback for one specific page > inode->i_rwsem for the directory > inode->i_rwsem for the file > cifsInodeInflock_sem > > > > here are the more gory details (let me know if I need to provide > anything more/better): > > [0 00:48:22.765] [UN] PID: 8863 TASK: ffff8c691547c5c0 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff9965007e3ba8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007e3c38] schedule at ffffffff9b6e64df > #2 [ffff9965007e3c48] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965007e3cb8] legitimize_path at ffffffff9b0f975d > #4 [ffff9965007e3d08] path_openat at ffffffff9b0fe55d > #5 [ffff9965007e3dd8] do_filp_open at ffffffff9b100a33 > #6 [ffff9965007e3ee0] do_sys_open at ffffffff9b0eb2d6 > #7 [ffff9965007e3f38] do_syscall_64 at ffffffff9ae04315 > * (I think legitimize_path is bogus) > > in path_openat > } else { > const char *s = path_init(nd, flags); > while (!(error = link_path_walk(s, nd)) && > (error = do_last(nd, file, op)) > 0) { <<<< > > do_last: > if (open_flag & O_CREAT) > inode_lock(dir->d_inode); <<<< > else > so it's trying to take inode->i_rwsem for the directory > > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68bb8e79c0 ffff8c691158ef20 ffff8c6915bf9000 DIR /mnt/vm1_smb/ > inode.i_rwsem is ffff8c691158efc0 > > <struct rw_semaphore 0xffff8c691158efc0>: > owner: <struct task_struct 0xffff8c6914275d00> (UN - 8856 - > reopen_file), counter: 0x0000000000000003 > waitlist: 2 > 0xffff9965007e3c90 8863 reopen_file UN 0 1:29:22.926 > RWSEM_WAITING_FOR_WRITE > 0xffff996500393e00 9802 ls UN 0 1:17:26.700 > RWSEM_WAITING_FOR_READ > > > the owner of the inode.i_rwsem of the directory is: > > [0 00:00:00.109] [UN] PID: 8856 TASK: ffff8c6914275d00 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff99650065b828] __schedule at ffffffff9b6e6095 > #1 [ffff99650065b8b8] schedule at ffffffff9b6e64df > #2 [ffff99650065b8c8] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff99650065b940] msleep at ffffffff9af573a9 > #4 [ffff99650065b948] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff99650065ba38] cifs_writepage_locked at ffffffffc0a0b8f3 [cifs] > #6 [ffff99650065bab0] cifs_launder_page at ffffffffc0a0bb72 [cifs] > #7 [ffff99650065bb30] invalidate_inode_pages2_range at ffffffff9b04d4bd > #8 [ffff99650065bcb8] cifs_invalidate_mapping at ffffffffc0a11339 [cifs] > #9 [ffff99650065bcd0] cifs_revalidate_mapping at ffffffffc0a1139a [cifs] > #10 [ffff99650065bcf0] cifs_d_revalidate at ffffffffc0a014f6 [cifs] > #11 [ffff99650065bd08] path_openat at ffffffff9b0fe7f7 > #12 [ffff99650065bdd8] do_filp_open at ffffffff9b100a33 > #13 [ffff99650065bee0] do_sys_open at ffffffff9b0eb2d6 > #14 [ffff99650065bf38] do_syscall_64 at ffffffff9ae04315 > > cifs_launder_page is for page 0xffffd1e2c07d2480 > > crash> page.index,mapping,flags 0xffffd1e2c07d2480 > index = 0x8 > mapping = 0xffff8c68f3cd0db0 > flags = 0xfffffc0008095 > > PAGE-FLAG BIT VALUE > PG_locked 0 0000001 > PG_uptodate 2 0000004 > PG_lru 4 0000010 > PG_waiters 7 0000080 > PG_writeback 15 0008000 > > > inode is ffff8c68f3cd0c40 > inode.i_rwsem is ffff8c68f3cd0ce0 > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68a1f1b480 ffff8c68f3cd0c40 ffff8c6915bf9000 REG > /mnt/vm1_smb/testfile.8853 > > > this process holds the inode->i_rwsem for the parent directory, is > laundering a page attached to the inode of the file it's opening, and in > _cifsFileInfo_put is trying to down_write the cifsInodeInflock_sem > for the file itself. > > > <struct rw_semaphore 0xffff8c68f3cd0ce0>: > owner: <struct task_struct 0xffff8c6914272e80> (UN - 8854 - > reopen_file), counter: 0x0000000000000003 > waitlist: 1 > 0xffff9965005dfd80 8855 reopen_file UN 0 1:29:22.912 > RWSEM_WAITING_FOR_WRITE > > this is the inode.i_rwsem for the file > > the owner: > > [0 00:48:22.739] [UN] PID: 8854 TASK: ffff8c6914272e80 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff99650054fb38] __schedule at ffffffff9b6e6095 > #1 [ffff99650054fbc8] schedule at ffffffff9b6e64df > #2 [ffff99650054fbd8] io_schedule at ffffffff9b6e68e2 > #3 [ffff99650054fbe8] __lock_page at ffffffff9b03c56f > #4 [ffff99650054fc80] pagecache_get_page at ffffffff9b03dcdf > #5 [ffff99650054fcc0] grab_cache_page_write_begin at ffffffff9b03ef4c > #6 [ffff99650054fcd0] cifs_write_begin at ffffffffc0a064ec [cifs] > #7 [ffff99650054fd30] generic_perform_write at ffffffff9b03bba4 > #8 [ffff99650054fda8] __generic_file_write_iter at ffffffff9b04060a > #9 [ffff99650054fdf0] cifs_strict_writev.cold.70 at ffffffffc0a4469b [cifs] > #10 [ffff99650054fe48] new_sync_write at ffffffff9b0ec1dd > #11 [ffff99650054fed0] vfs_write at ffffffff9b0eed35 > #12 [ffff99650054ff00] ksys_write at ffffffff9b0eefd9 > #13 [ffff99650054ff38] do_syscall_64 at ffffffff9ae04315 > > the process holds the inode->i_rwsem for the file to which it's writing, > and is trying to __lock_page for the same page as in the other processes > > > the other tasks: > [0 00:00:00.028] [UN] PID: 8859 TASK: ffff8c6915479740 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff9965007b39d8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007b3a68] schedule at ffffffff9b6e64df > #2 [ffff9965007b3a78] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007b3af0] msleep at ffffffff9af573a9 > #4 [ffff9965007b3af8] cifs_new_fileinfo.cold.61 at ffffffffc0a42a07 [cifs] > #5 [ffff9965007b3b78] cifs_open at ffffffffc0a0709d [cifs] > #6 [ffff9965007b3cd8] do_dentry_open at ffffffff9b0e9b7a > #7 [ffff9965007b3d08] path_openat at ffffffff9b0fe34f > #8 [ffff9965007b3dd8] do_filp_open at ffffffff9b100a33 > #9 [ffff9965007b3ee0] do_sys_open at ffffffff9b0eb2d6 > #10 [ffff9965007b3f38] do_syscall_64 at ffffffff9ae04315 > > this is opening the file, and is trying to down_write cinode->lock_sem > > > [0 00:00:00.041] [UN] PID: 8860 TASK: ffff8c691547ae80 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.057] [UN] PID: 8861 TASK: ffff8c6915478000 CPU: 3 > COMMAND: "reopen_file" > [0 00:00:00.059] [UN] PID: 8858 TASK: ffff8c6914271740 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.109] [UN] PID: 8862 TASK: ffff8c691547dd00 CPU: 6 > COMMAND: "reopen_file" > #0 [ffff9965007c3c78] __schedule at ffffffff9b6e6095 > #1 [ffff9965007c3d08] schedule at ffffffff9b6e64df > #2 [ffff9965007c3d18] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007c3d90] msleep at ffffffff9af573a9 > #4 [ffff9965007c3d98] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff9965007c3e88] cifs_close at ffffffffc0a07aaf [cifs] > #6 [ffff9965007c3ea0] __fput at ffffffff9b0efa6e > #7 [ffff9965007c3ee8] task_work_run at ffffffff9aef1614 > #8 [ffff9965007c3f20] exit_to_usermode_loop at ffffffff9ae03d6f > #9 [ffff9965007c3f38] do_syscall_64 at ffffffff9ae0444c > > closing the file, and trying to down_write cifsi->lock_sem > > > [0 00:48:22.839] [UN] PID: 8857 TASK: ffff8c6914270000 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965006a7cc8] __schedule at ffffffff9b6e6095 > #1 [ffff9965006a7d58] schedule at ffffffff9b6e64df > #2 [ffff9965006a7d68] io_schedule at ffffffff9b6e68e2 > #3 [ffff9965006a7d78] wait_on_page_bit at ffffffff9b03cac6 > #4 [ffff9965006a7e10] __filemap_fdatawait_range at ffffffff9b03b028 > #5 [ffff9965006a7ed8] filemap_write_and_wait at ffffffff9b040165 > #6 [ffff9965006a7ef0] cifs_flush at ffffffffc0a0c2fa [cifs] > #7 [ffff9965006a7f10] filp_close at ffffffff9b0e93f1 > #8 [ffff9965006a7f30] __x64_sys_close at ffffffff9b0e9a0e > #9 [ffff9965006a7f38] do_syscall_64 at ffffffff9ae04315 > > in __filemap_fdatawait_range > wait_on_page_writeback(page); > for the same page of the file > > > > [0 00:48:22.718] [UN] PID: 8855 TASK: ffff8c69142745c0 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965005dfc98] __schedule at ffffffff9b6e6095 > #1 [ffff9965005dfd28] schedule at ffffffff9b6e64df > #2 [ffff9965005dfd38] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965005dfdf0] cifs_strict_writev at ffffffffc0a0c40a [cifs] > #4 [ffff9965005dfe48] new_sync_write at ffffffff9b0ec1dd > #5 [ffff9965005dfed0] vfs_write at ffffffff9b0eed35 > #6 [ffff9965005dff00] ksys_write at ffffffff9b0eefd9 > #7 [ffff9965005dff38] do_syscall_64 at ffffffff9ae04315 > > inode_lock(inode); > > > and one 'ls' later on, to see whether the rest of the mount is available > (the test file is in the root, so we get blocked up on the directory > ->i_rwsem), so the entire mount is unavailable > > [0 00:36:26.473] [UN] PID: 9802 TASK: ffff8c691436ae80 CPU: 4 > COMMAND: "ls" > #0 [ffff996500393d28] __schedule at ffffffff9b6e6095 > #1 [ffff996500393db8] schedule at ffffffff9b6e64df > #2 [ffff996500393dc8] rwsem_down_read_slowpath at ffffffff9b6e9421 > #3 [ffff996500393e78] down_read_killable at ffffffff9b6e95e2 > #4 [ffff996500393e88] iterate_dir at ffffffff9b103c56 > #5 [ffff996500393ec8] ksys_getdents64 at ffffffff9b104b0c > #6 [ffff996500393f30] __x64_sys_getdents64 at ffffffff9b104bb6 > #7 [ffff996500393f38] do_syscall_64 at ffffffff9ae04315 > > in iterate_dir: > if (shared) > res = down_read_killable(&inode->i_rwsem); <<<< > else > res = down_write_killable(&inode->i_rwsem); > Reported-by: Frank Sorenson <sorenson@redhat.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-11-03 03:06:37 +00:00
INIT_WORK(&cfile->put, cifsFileInfo_put_work);
INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
mutex_init(&cfile->fh_mutex);
spin_lock_init(&cfile->file_info_lock);
cifs_sb_active(inode->i_sb);
/*
* If the server returned a read oplock and we have mandatory brlocks,
* set oplock level to None.
*/
if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
oplock = 0;
}
CIFS: Fix NULL-pointer dereference in smb2_push_mandatory_locks Currently when the client creates a cifsFileInfo structure for a newly opened file, it allocates a list of byte-range locks with a pointer to the new cfile and attaches this list to the inode's lock list. The latter happens before initializing all other fields, e.g. cfile->tlink. Thus a partially initialized cifsFileInfo structure becomes available to other threads that walk through the inode's lock list. One example of such a thread may be an oplock break worker thread that tries to push all cached byte-range locks. This causes NULL-pointer dereference in smb2_push_mandatory_locks() when accessing cfile->tlink: [598428.945633] BUG: kernel NULL pointer dereference, address: 0000000000000038 ... [598428.945749] Workqueue: cifsoplockd cifs_oplock_break [cifs] [598428.945793] RIP: 0010:smb2_push_mandatory_locks+0xd6/0x5a0 [cifs] ... [598428.945834] Call Trace: [598428.945870] ? cifs_revalidate_mapping+0x45/0x90 [cifs] [598428.945901] cifs_oplock_break+0x13d/0x450 [cifs] [598428.945909] process_one_work+0x1db/0x380 [598428.945914] worker_thread+0x4d/0x400 [598428.945921] kthread+0x104/0x140 [598428.945925] ? process_one_work+0x380/0x380 [598428.945931] ? kthread_park+0x80/0x80 [598428.945937] ret_from_fork+0x35/0x40 Fix this by reordering initialization steps of the cifsFileInfo structure: initialize all the fields first and then add the new byte-range lock list to the inode's lock list. Cc: Stable <stable@vger.kernel.org> Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com> Reviewed-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-11-28 00:18:39 +00:00
cifs_down_write(&cinode->lock_sem);
list_add(&fdlocks->llist, &cinode->llist);
up_write(&cinode->lock_sem);
spin_lock(&tcon->open_file_lock);
if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
oplock = fid->pending_open->oplock;
list_del(&fid->pending_open->olist);
fid->purge_cache = false;
server->ops->set_fid(cfile, fid, oplock);
list_add(&cfile->tlist, &tcon->openFileList);
atomic_inc(&tcon->num_local_opens);
/* if readable file instance put first in list*/
spin_lock(&cinode->open_file_lock);
if (file->f_mode & FMODE_READ)
list_add(&cfile->flist, &cinode->openFileList);
else
list_add_tail(&cfile->flist, &cinode->openFileList);
spin_unlock(&cinode->open_file_lock);
spin_unlock(&tcon->open_file_lock);
if (fid->purge_cache)
cifs_zap_mapping(inode);
file->private_data = cfile;
return cfile;
}
struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo *cifs_file)
{
spin_lock(&cifs_file->file_info_lock);
cifsFileInfo_get_locked(cifs_file);
spin_unlock(&cifs_file->file_info_lock);
return cifs_file;
}
cifs: move cifsFileInfo_put logic into a work-queue This patch moves the final part of the cifsFileInfo_put() logic where we need a write lock on lock_sem to be processed in a separate thread that holds no other locks. This is to prevent deadlocks like the one below: > there are 6 processes looping to while trying to down_write > cinode->lock_sem, 5 of them from _cifsFileInfo_put, and one from > cifs_new_fileinfo > > and there are 5 other processes which are blocked, several of them > waiting on either PG_writeback or PG_locked (which are both set), all > for the same page of the file > > 2 inode_lock() (inode->i_rwsem) for the file > 1 wait_on_page_writeback() for the page > 1 down_read(inode->i_rwsem) for the inode of the directory > 1 inode_lock()(inode->i_rwsem) for the inode of the directory > 1 __lock_page > > > so processes are blocked waiting on: > page flags PG_locked and PG_writeback for one specific page > inode->i_rwsem for the directory > inode->i_rwsem for the file > cifsInodeInflock_sem > > > > here are the more gory details (let me know if I need to provide > anything more/better): > > [0 00:48:22.765] [UN] PID: 8863 TASK: ffff8c691547c5c0 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff9965007e3ba8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007e3c38] schedule at ffffffff9b6e64df > #2 [ffff9965007e3c48] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965007e3cb8] legitimize_path at ffffffff9b0f975d > #4 [ffff9965007e3d08] path_openat at ffffffff9b0fe55d > #5 [ffff9965007e3dd8] do_filp_open at ffffffff9b100a33 > #6 [ffff9965007e3ee0] do_sys_open at ffffffff9b0eb2d6 > #7 [ffff9965007e3f38] do_syscall_64 at ffffffff9ae04315 > * (I think legitimize_path is bogus) > > in path_openat > } else { > const char *s = path_init(nd, flags); > while (!(error = link_path_walk(s, nd)) && > (error = do_last(nd, file, op)) > 0) { <<<< > > do_last: > if (open_flag & O_CREAT) > inode_lock(dir->d_inode); <<<< > else > so it's trying to take inode->i_rwsem for the directory > > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68bb8e79c0 ffff8c691158ef20 ffff8c6915bf9000 DIR /mnt/vm1_smb/ > inode.i_rwsem is ffff8c691158efc0 > > <struct rw_semaphore 0xffff8c691158efc0>: > owner: <struct task_struct 0xffff8c6914275d00> (UN - 8856 - > reopen_file), counter: 0x0000000000000003 > waitlist: 2 > 0xffff9965007e3c90 8863 reopen_file UN 0 1:29:22.926 > RWSEM_WAITING_FOR_WRITE > 0xffff996500393e00 9802 ls UN 0 1:17:26.700 > RWSEM_WAITING_FOR_READ > > > the owner of the inode.i_rwsem of the directory is: > > [0 00:00:00.109] [UN] PID: 8856 TASK: ffff8c6914275d00 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff99650065b828] __schedule at ffffffff9b6e6095 > #1 [ffff99650065b8b8] schedule at ffffffff9b6e64df > #2 [ffff99650065b8c8] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff99650065b940] msleep at ffffffff9af573a9 > #4 [ffff99650065b948] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff99650065ba38] cifs_writepage_locked at ffffffffc0a0b8f3 [cifs] > #6 [ffff99650065bab0] cifs_launder_page at ffffffffc0a0bb72 [cifs] > #7 [ffff99650065bb30] invalidate_inode_pages2_range at ffffffff9b04d4bd > #8 [ffff99650065bcb8] cifs_invalidate_mapping at ffffffffc0a11339 [cifs] > #9 [ffff99650065bcd0] cifs_revalidate_mapping at ffffffffc0a1139a [cifs] > #10 [ffff99650065bcf0] cifs_d_revalidate at ffffffffc0a014f6 [cifs] > #11 [ffff99650065bd08] path_openat at ffffffff9b0fe7f7 > #12 [ffff99650065bdd8] do_filp_open at ffffffff9b100a33 > #13 [ffff99650065bee0] do_sys_open at ffffffff9b0eb2d6 > #14 [ffff99650065bf38] do_syscall_64 at ffffffff9ae04315 > > cifs_launder_page is for page 0xffffd1e2c07d2480 > > crash> page.index,mapping,flags 0xffffd1e2c07d2480 > index = 0x8 > mapping = 0xffff8c68f3cd0db0 > flags = 0xfffffc0008095 > > PAGE-FLAG BIT VALUE > PG_locked 0 0000001 > PG_uptodate 2 0000004 > PG_lru 4 0000010 > PG_waiters 7 0000080 > PG_writeback 15 0008000 > > > inode is ffff8c68f3cd0c40 > inode.i_rwsem is ffff8c68f3cd0ce0 > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68a1f1b480 ffff8c68f3cd0c40 ffff8c6915bf9000 REG > /mnt/vm1_smb/testfile.8853 > > > this process holds the inode->i_rwsem for the parent directory, is > laundering a page attached to the inode of the file it's opening, and in > _cifsFileInfo_put is trying to down_write the cifsInodeInflock_sem > for the file itself. > > > <struct rw_semaphore 0xffff8c68f3cd0ce0>: > owner: <struct task_struct 0xffff8c6914272e80> (UN - 8854 - > reopen_file), counter: 0x0000000000000003 > waitlist: 1 > 0xffff9965005dfd80 8855 reopen_file UN 0 1:29:22.912 > RWSEM_WAITING_FOR_WRITE > > this is the inode.i_rwsem for the file > > the owner: > > [0 00:48:22.739] [UN] PID: 8854 TASK: ffff8c6914272e80 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff99650054fb38] __schedule at ffffffff9b6e6095 > #1 [ffff99650054fbc8] schedule at ffffffff9b6e64df > #2 [ffff99650054fbd8] io_schedule at ffffffff9b6e68e2 > #3 [ffff99650054fbe8] __lock_page at ffffffff9b03c56f > #4 [ffff99650054fc80] pagecache_get_page at ffffffff9b03dcdf > #5 [ffff99650054fcc0] grab_cache_page_write_begin at ffffffff9b03ef4c > #6 [ffff99650054fcd0] cifs_write_begin at ffffffffc0a064ec [cifs] > #7 [ffff99650054fd30] generic_perform_write at ffffffff9b03bba4 > #8 [ffff99650054fda8] __generic_file_write_iter at ffffffff9b04060a > #9 [ffff99650054fdf0] cifs_strict_writev.cold.70 at ffffffffc0a4469b [cifs] > #10 [ffff99650054fe48] new_sync_write at ffffffff9b0ec1dd > #11 [ffff99650054fed0] vfs_write at ffffffff9b0eed35 > #12 [ffff99650054ff00] ksys_write at ffffffff9b0eefd9 > #13 [ffff99650054ff38] do_syscall_64 at ffffffff9ae04315 > > the process holds the inode->i_rwsem for the file to which it's writing, > and is trying to __lock_page for the same page as in the other processes > > > the other tasks: > [0 00:00:00.028] [UN] PID: 8859 TASK: ffff8c6915479740 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff9965007b39d8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007b3a68] schedule at ffffffff9b6e64df > #2 [ffff9965007b3a78] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007b3af0] msleep at ffffffff9af573a9 > #4 [ffff9965007b3af8] cifs_new_fileinfo.cold.61 at ffffffffc0a42a07 [cifs] > #5 [ffff9965007b3b78] cifs_open at ffffffffc0a0709d [cifs] > #6 [ffff9965007b3cd8] do_dentry_open at ffffffff9b0e9b7a > #7 [ffff9965007b3d08] path_openat at ffffffff9b0fe34f > #8 [ffff9965007b3dd8] do_filp_open at ffffffff9b100a33 > #9 [ffff9965007b3ee0] do_sys_open at ffffffff9b0eb2d6 > #10 [ffff9965007b3f38] do_syscall_64 at ffffffff9ae04315 > > this is opening the file, and is trying to down_write cinode->lock_sem > > > [0 00:00:00.041] [UN] PID: 8860 TASK: ffff8c691547ae80 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.057] [UN] PID: 8861 TASK: ffff8c6915478000 CPU: 3 > COMMAND: "reopen_file" > [0 00:00:00.059] [UN] PID: 8858 TASK: ffff8c6914271740 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.109] [UN] PID: 8862 TASK: ffff8c691547dd00 CPU: 6 > COMMAND: "reopen_file" > #0 [ffff9965007c3c78] __schedule at ffffffff9b6e6095 > #1 [ffff9965007c3d08] schedule at ffffffff9b6e64df > #2 [ffff9965007c3d18] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007c3d90] msleep at ffffffff9af573a9 > #4 [ffff9965007c3d98] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff9965007c3e88] cifs_close at ffffffffc0a07aaf [cifs] > #6 [ffff9965007c3ea0] __fput at ffffffff9b0efa6e > #7 [ffff9965007c3ee8] task_work_run at ffffffff9aef1614 > #8 [ffff9965007c3f20] exit_to_usermode_loop at ffffffff9ae03d6f > #9 [ffff9965007c3f38] do_syscall_64 at ffffffff9ae0444c > > closing the file, and trying to down_write cifsi->lock_sem > > > [0 00:48:22.839] [UN] PID: 8857 TASK: ffff8c6914270000 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965006a7cc8] __schedule at ffffffff9b6e6095 > #1 [ffff9965006a7d58] schedule at ffffffff9b6e64df > #2 [ffff9965006a7d68] io_schedule at ffffffff9b6e68e2 > #3 [ffff9965006a7d78] wait_on_page_bit at ffffffff9b03cac6 > #4 [ffff9965006a7e10] __filemap_fdatawait_range at ffffffff9b03b028 > #5 [ffff9965006a7ed8] filemap_write_and_wait at ffffffff9b040165 > #6 [ffff9965006a7ef0] cifs_flush at ffffffffc0a0c2fa [cifs] > #7 [ffff9965006a7f10] filp_close at ffffffff9b0e93f1 > #8 [ffff9965006a7f30] __x64_sys_close at ffffffff9b0e9a0e > #9 [ffff9965006a7f38] do_syscall_64 at ffffffff9ae04315 > > in __filemap_fdatawait_range > wait_on_page_writeback(page); > for the same page of the file > > > > [0 00:48:22.718] [UN] PID: 8855 TASK: ffff8c69142745c0 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965005dfc98] __schedule at ffffffff9b6e6095 > #1 [ffff9965005dfd28] schedule at ffffffff9b6e64df > #2 [ffff9965005dfd38] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965005dfdf0] cifs_strict_writev at ffffffffc0a0c40a [cifs] > #4 [ffff9965005dfe48] new_sync_write at ffffffff9b0ec1dd > #5 [ffff9965005dfed0] vfs_write at ffffffff9b0eed35 > #6 [ffff9965005dff00] ksys_write at ffffffff9b0eefd9 > #7 [ffff9965005dff38] do_syscall_64 at ffffffff9ae04315 > > inode_lock(inode); > > > and one 'ls' later on, to see whether the rest of the mount is available > (the test file is in the root, so we get blocked up on the directory > ->i_rwsem), so the entire mount is unavailable > > [0 00:36:26.473] [UN] PID: 9802 TASK: ffff8c691436ae80 CPU: 4 > COMMAND: "ls" > #0 [ffff996500393d28] __schedule at ffffffff9b6e6095 > #1 [ffff996500393db8] schedule at ffffffff9b6e64df > #2 [ffff996500393dc8] rwsem_down_read_slowpath at ffffffff9b6e9421 > #3 [ffff996500393e78] down_read_killable at ffffffff9b6e95e2 > #4 [ffff996500393e88] iterate_dir at ffffffff9b103c56 > #5 [ffff996500393ec8] ksys_getdents64 at ffffffff9b104b0c > #6 [ffff996500393f30] __x64_sys_getdents64 at ffffffff9b104bb6 > #7 [ffff996500393f38] do_syscall_64 at ffffffff9ae04315 > > in iterate_dir: > if (shared) > res = down_read_killable(&inode->i_rwsem); <<<< > else > res = down_write_killable(&inode->i_rwsem); > Reported-by: Frank Sorenson <sorenson@redhat.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-11-03 03:06:37 +00:00
static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
{
struct inode *inode = d_inode(cifs_file->dentry);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifsLockInfo *li, *tmp;
struct super_block *sb = inode->i_sb;
cifs_fscache_release_inode_cookie(inode);
cifs: move cifsFileInfo_put logic into a work-queue This patch moves the final part of the cifsFileInfo_put() logic where we need a write lock on lock_sem to be processed in a separate thread that holds no other locks. This is to prevent deadlocks like the one below: > there are 6 processes looping to while trying to down_write > cinode->lock_sem, 5 of them from _cifsFileInfo_put, and one from > cifs_new_fileinfo > > and there are 5 other processes which are blocked, several of them > waiting on either PG_writeback or PG_locked (which are both set), all > for the same page of the file > > 2 inode_lock() (inode->i_rwsem) for the file > 1 wait_on_page_writeback() for the page > 1 down_read(inode->i_rwsem) for the inode of the directory > 1 inode_lock()(inode->i_rwsem) for the inode of the directory > 1 __lock_page > > > so processes are blocked waiting on: > page flags PG_locked and PG_writeback for one specific page > inode->i_rwsem for the directory > inode->i_rwsem for the file > cifsInodeInflock_sem > > > > here are the more gory details (let me know if I need to provide > anything more/better): > > [0 00:48:22.765] [UN] PID: 8863 TASK: ffff8c691547c5c0 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff9965007e3ba8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007e3c38] schedule at ffffffff9b6e64df > #2 [ffff9965007e3c48] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965007e3cb8] legitimize_path at ffffffff9b0f975d > #4 [ffff9965007e3d08] path_openat at ffffffff9b0fe55d > #5 [ffff9965007e3dd8] do_filp_open at ffffffff9b100a33 > #6 [ffff9965007e3ee0] do_sys_open at ffffffff9b0eb2d6 > #7 [ffff9965007e3f38] do_syscall_64 at ffffffff9ae04315 > * (I think legitimize_path is bogus) > > in path_openat > } else { > const char *s = path_init(nd, flags); > while (!(error = link_path_walk(s, nd)) && > (error = do_last(nd, file, op)) > 0) { <<<< > > do_last: > if (open_flag & O_CREAT) > inode_lock(dir->d_inode); <<<< > else > so it's trying to take inode->i_rwsem for the directory > > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68bb8e79c0 ffff8c691158ef20 ffff8c6915bf9000 DIR /mnt/vm1_smb/ > inode.i_rwsem is ffff8c691158efc0 > > <struct rw_semaphore 0xffff8c691158efc0>: > owner: <struct task_struct 0xffff8c6914275d00> (UN - 8856 - > reopen_file), counter: 0x0000000000000003 > waitlist: 2 > 0xffff9965007e3c90 8863 reopen_file UN 0 1:29:22.926 > RWSEM_WAITING_FOR_WRITE > 0xffff996500393e00 9802 ls UN 0 1:17:26.700 > RWSEM_WAITING_FOR_READ > > > the owner of the inode.i_rwsem of the directory is: > > [0 00:00:00.109] [UN] PID: 8856 TASK: ffff8c6914275d00 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff99650065b828] __schedule at ffffffff9b6e6095 > #1 [ffff99650065b8b8] schedule at ffffffff9b6e64df > #2 [ffff99650065b8c8] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff99650065b940] msleep at ffffffff9af573a9 > #4 [ffff99650065b948] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff99650065ba38] cifs_writepage_locked at ffffffffc0a0b8f3 [cifs] > #6 [ffff99650065bab0] cifs_launder_page at ffffffffc0a0bb72 [cifs] > #7 [ffff99650065bb30] invalidate_inode_pages2_range at ffffffff9b04d4bd > #8 [ffff99650065bcb8] cifs_invalidate_mapping at ffffffffc0a11339 [cifs] > #9 [ffff99650065bcd0] cifs_revalidate_mapping at ffffffffc0a1139a [cifs] > #10 [ffff99650065bcf0] cifs_d_revalidate at ffffffffc0a014f6 [cifs] > #11 [ffff99650065bd08] path_openat at ffffffff9b0fe7f7 > #12 [ffff99650065bdd8] do_filp_open at ffffffff9b100a33 > #13 [ffff99650065bee0] do_sys_open at ffffffff9b0eb2d6 > #14 [ffff99650065bf38] do_syscall_64 at ffffffff9ae04315 > > cifs_launder_page is for page 0xffffd1e2c07d2480 > > crash> page.index,mapping,flags 0xffffd1e2c07d2480 > index = 0x8 > mapping = 0xffff8c68f3cd0db0 > flags = 0xfffffc0008095 > > PAGE-FLAG BIT VALUE > PG_locked 0 0000001 > PG_uptodate 2 0000004 > PG_lru 4 0000010 > PG_waiters 7 0000080 > PG_writeback 15 0008000 > > > inode is ffff8c68f3cd0c40 > inode.i_rwsem is ffff8c68f3cd0ce0 > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68a1f1b480 ffff8c68f3cd0c40 ffff8c6915bf9000 REG > /mnt/vm1_smb/testfile.8853 > > > this process holds the inode->i_rwsem for the parent directory, is > laundering a page attached to the inode of the file it's opening, and in > _cifsFileInfo_put is trying to down_write the cifsInodeInflock_sem > for the file itself. > > > <struct rw_semaphore 0xffff8c68f3cd0ce0>: > owner: <struct task_struct 0xffff8c6914272e80> (UN - 8854 - > reopen_file), counter: 0x0000000000000003 > waitlist: 1 > 0xffff9965005dfd80 8855 reopen_file UN 0 1:29:22.912 > RWSEM_WAITING_FOR_WRITE > > this is the inode.i_rwsem for the file > > the owner: > > [0 00:48:22.739] [UN] PID: 8854 TASK: ffff8c6914272e80 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff99650054fb38] __schedule at ffffffff9b6e6095 > #1 [ffff99650054fbc8] schedule at ffffffff9b6e64df > #2 [ffff99650054fbd8] io_schedule at ffffffff9b6e68e2 > #3 [ffff99650054fbe8] __lock_page at ffffffff9b03c56f > #4 [ffff99650054fc80] pagecache_get_page at ffffffff9b03dcdf > #5 [ffff99650054fcc0] grab_cache_page_write_begin at ffffffff9b03ef4c > #6 [ffff99650054fcd0] cifs_write_begin at ffffffffc0a064ec [cifs] > #7 [ffff99650054fd30] generic_perform_write at ffffffff9b03bba4 > #8 [ffff99650054fda8] __generic_file_write_iter at ffffffff9b04060a > #9 [ffff99650054fdf0] cifs_strict_writev.cold.70 at ffffffffc0a4469b [cifs] > #10 [ffff99650054fe48] new_sync_write at ffffffff9b0ec1dd > #11 [ffff99650054fed0] vfs_write at ffffffff9b0eed35 > #12 [ffff99650054ff00] ksys_write at ffffffff9b0eefd9 > #13 [ffff99650054ff38] do_syscall_64 at ffffffff9ae04315 > > the process holds the inode->i_rwsem for the file to which it's writing, > and is trying to __lock_page for the same page as in the other processes > > > the other tasks: > [0 00:00:00.028] [UN] PID: 8859 TASK: ffff8c6915479740 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff9965007b39d8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007b3a68] schedule at ffffffff9b6e64df > #2 [ffff9965007b3a78] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007b3af0] msleep at ffffffff9af573a9 > #4 [ffff9965007b3af8] cifs_new_fileinfo.cold.61 at ffffffffc0a42a07 [cifs] > #5 [ffff9965007b3b78] cifs_open at ffffffffc0a0709d [cifs] > #6 [ffff9965007b3cd8] do_dentry_open at ffffffff9b0e9b7a > #7 [ffff9965007b3d08] path_openat at ffffffff9b0fe34f > #8 [ffff9965007b3dd8] do_filp_open at ffffffff9b100a33 > #9 [ffff9965007b3ee0] do_sys_open at ffffffff9b0eb2d6 > #10 [ffff9965007b3f38] do_syscall_64 at ffffffff9ae04315 > > this is opening the file, and is trying to down_write cinode->lock_sem > > > [0 00:00:00.041] [UN] PID: 8860 TASK: ffff8c691547ae80 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.057] [UN] PID: 8861 TASK: ffff8c6915478000 CPU: 3 > COMMAND: "reopen_file" > [0 00:00:00.059] [UN] PID: 8858 TASK: ffff8c6914271740 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.109] [UN] PID: 8862 TASK: ffff8c691547dd00 CPU: 6 > COMMAND: "reopen_file" > #0 [ffff9965007c3c78] __schedule at ffffffff9b6e6095 > #1 [ffff9965007c3d08] schedule at ffffffff9b6e64df > #2 [ffff9965007c3d18] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007c3d90] msleep at ffffffff9af573a9 > #4 [ffff9965007c3d98] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff9965007c3e88] cifs_close at ffffffffc0a07aaf [cifs] > #6 [ffff9965007c3ea0] __fput at ffffffff9b0efa6e > #7 [ffff9965007c3ee8] task_work_run at ffffffff9aef1614 > #8 [ffff9965007c3f20] exit_to_usermode_loop at ffffffff9ae03d6f > #9 [ffff9965007c3f38] do_syscall_64 at ffffffff9ae0444c > > closing the file, and trying to down_write cifsi->lock_sem > > > [0 00:48:22.839] [UN] PID: 8857 TASK: ffff8c6914270000 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965006a7cc8] __schedule at ffffffff9b6e6095 > #1 [ffff9965006a7d58] schedule at ffffffff9b6e64df > #2 [ffff9965006a7d68] io_schedule at ffffffff9b6e68e2 > #3 [ffff9965006a7d78] wait_on_page_bit at ffffffff9b03cac6 > #4 [ffff9965006a7e10] __filemap_fdatawait_range at ffffffff9b03b028 > #5 [ffff9965006a7ed8] filemap_write_and_wait at ffffffff9b040165 > #6 [ffff9965006a7ef0] cifs_flush at ffffffffc0a0c2fa [cifs] > #7 [ffff9965006a7f10] filp_close at ffffffff9b0e93f1 > #8 [ffff9965006a7f30] __x64_sys_close at ffffffff9b0e9a0e > #9 [ffff9965006a7f38] do_syscall_64 at ffffffff9ae04315 > > in __filemap_fdatawait_range > wait_on_page_writeback(page); > for the same page of the file > > > > [0 00:48:22.718] [UN] PID: 8855 TASK: ffff8c69142745c0 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965005dfc98] __schedule at ffffffff9b6e6095 > #1 [ffff9965005dfd28] schedule at ffffffff9b6e64df > #2 [ffff9965005dfd38] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965005dfdf0] cifs_strict_writev at ffffffffc0a0c40a [cifs] > #4 [ffff9965005dfe48] new_sync_write at ffffffff9b0ec1dd > #5 [ffff9965005dfed0] vfs_write at ffffffff9b0eed35 > #6 [ffff9965005dff00] ksys_write at ffffffff9b0eefd9 > #7 [ffff9965005dff38] do_syscall_64 at ffffffff9ae04315 > > inode_lock(inode); > > > and one 'ls' later on, to see whether the rest of the mount is available > (the test file is in the root, so we get blocked up on the directory > ->i_rwsem), so the entire mount is unavailable > > [0 00:36:26.473] [UN] PID: 9802 TASK: ffff8c691436ae80 CPU: 4 > COMMAND: "ls" > #0 [ffff996500393d28] __schedule at ffffffff9b6e6095 > #1 [ffff996500393db8] schedule at ffffffff9b6e64df > #2 [ffff996500393dc8] rwsem_down_read_slowpath at ffffffff9b6e9421 > #3 [ffff996500393e78] down_read_killable at ffffffff9b6e95e2 > #4 [ffff996500393e88] iterate_dir at ffffffff9b103c56 > #5 [ffff996500393ec8] ksys_getdents64 at ffffffff9b104b0c > #6 [ffff996500393f30] __x64_sys_getdents64 at ffffffff9b104bb6 > #7 [ffff996500393f38] do_syscall_64 at ffffffff9ae04315 > > in iterate_dir: > if (shared) > res = down_read_killable(&inode->i_rwsem); <<<< > else > res = down_write_killable(&inode->i_rwsem); > Reported-by: Frank Sorenson <sorenson@redhat.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-11-03 03:06:37 +00:00
/*
* Delete any outstanding lock records. We'll lose them when the file
* is closed anyway.
*/
cifs_down_write(&cifsi->lock_sem);
list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
list_del(&li->llist);
cifs_del_lock_waiters(li);
kfree(li);
}
list_del(&cifs_file->llist->llist);
kfree(cifs_file->llist);
up_write(&cifsi->lock_sem);
cifs_put_tlink(cifs_file->tlink);
dput(cifs_file->dentry);
cifs_sb_deactive(sb);
kfree(cifs_file);
}
static void cifsFileInfo_put_work(struct work_struct *work)
{
struct cifsFileInfo *cifs_file = container_of(work,
struct cifsFileInfo, put);
cifsFileInfo_put_final(cifs_file);
}
/**
* cifsFileInfo_put - release a reference of file priv data
*
* Always potentially wait for oplock handler. See _cifsFileInfo_put().
*
* @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
*/
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
cifs: move cifsFileInfo_put logic into a work-queue This patch moves the final part of the cifsFileInfo_put() logic where we need a write lock on lock_sem to be processed in a separate thread that holds no other locks. This is to prevent deadlocks like the one below: > there are 6 processes looping to while trying to down_write > cinode->lock_sem, 5 of them from _cifsFileInfo_put, and one from > cifs_new_fileinfo > > and there are 5 other processes which are blocked, several of them > waiting on either PG_writeback or PG_locked (which are both set), all > for the same page of the file > > 2 inode_lock() (inode->i_rwsem) for the file > 1 wait_on_page_writeback() for the page > 1 down_read(inode->i_rwsem) for the inode of the directory > 1 inode_lock()(inode->i_rwsem) for the inode of the directory > 1 __lock_page > > > so processes are blocked waiting on: > page flags PG_locked and PG_writeback for one specific page > inode->i_rwsem for the directory > inode->i_rwsem for the file > cifsInodeInflock_sem > > > > here are the more gory details (let me know if I need to provide > anything more/better): > > [0 00:48:22.765] [UN] PID: 8863 TASK: ffff8c691547c5c0 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff9965007e3ba8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007e3c38] schedule at ffffffff9b6e64df > #2 [ffff9965007e3c48] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965007e3cb8] legitimize_path at ffffffff9b0f975d > #4 [ffff9965007e3d08] path_openat at ffffffff9b0fe55d > #5 [ffff9965007e3dd8] do_filp_open at ffffffff9b100a33 > #6 [ffff9965007e3ee0] do_sys_open at ffffffff9b0eb2d6 > #7 [ffff9965007e3f38] do_syscall_64 at ffffffff9ae04315 > * (I think legitimize_path is bogus) > > in path_openat > } else { > const char *s = path_init(nd, flags); > while (!(error = link_path_walk(s, nd)) && > (error = do_last(nd, file, op)) > 0) { <<<< > > do_last: > if (open_flag & O_CREAT) > inode_lock(dir->d_inode); <<<< > else > so it's trying to take inode->i_rwsem for the directory > > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68bb8e79c0 ffff8c691158ef20 ffff8c6915bf9000 DIR /mnt/vm1_smb/ > inode.i_rwsem is ffff8c691158efc0 > > <struct rw_semaphore 0xffff8c691158efc0>: > owner: <struct task_struct 0xffff8c6914275d00> (UN - 8856 - > reopen_file), counter: 0x0000000000000003 > waitlist: 2 > 0xffff9965007e3c90 8863 reopen_file UN 0 1:29:22.926 > RWSEM_WAITING_FOR_WRITE > 0xffff996500393e00 9802 ls UN 0 1:17:26.700 > RWSEM_WAITING_FOR_READ > > > the owner of the inode.i_rwsem of the directory is: > > [0 00:00:00.109] [UN] PID: 8856 TASK: ffff8c6914275d00 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff99650065b828] __schedule at ffffffff9b6e6095 > #1 [ffff99650065b8b8] schedule at ffffffff9b6e64df > #2 [ffff99650065b8c8] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff99650065b940] msleep at ffffffff9af573a9 > #4 [ffff99650065b948] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff99650065ba38] cifs_writepage_locked at ffffffffc0a0b8f3 [cifs] > #6 [ffff99650065bab0] cifs_launder_page at ffffffffc0a0bb72 [cifs] > #7 [ffff99650065bb30] invalidate_inode_pages2_range at ffffffff9b04d4bd > #8 [ffff99650065bcb8] cifs_invalidate_mapping at ffffffffc0a11339 [cifs] > #9 [ffff99650065bcd0] cifs_revalidate_mapping at ffffffffc0a1139a [cifs] > #10 [ffff99650065bcf0] cifs_d_revalidate at ffffffffc0a014f6 [cifs] > #11 [ffff99650065bd08] path_openat at ffffffff9b0fe7f7 > #12 [ffff99650065bdd8] do_filp_open at ffffffff9b100a33 > #13 [ffff99650065bee0] do_sys_open at ffffffff9b0eb2d6 > #14 [ffff99650065bf38] do_syscall_64 at ffffffff9ae04315 > > cifs_launder_page is for page 0xffffd1e2c07d2480 > > crash> page.index,mapping,flags 0xffffd1e2c07d2480 > index = 0x8 > mapping = 0xffff8c68f3cd0db0 > flags = 0xfffffc0008095 > > PAGE-FLAG BIT VALUE > PG_locked 0 0000001 > PG_uptodate 2 0000004 > PG_lru 4 0000010 > PG_waiters 7 0000080 > PG_writeback 15 0008000 > > > inode is ffff8c68f3cd0c40 > inode.i_rwsem is ffff8c68f3cd0ce0 > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68a1f1b480 ffff8c68f3cd0c40 ffff8c6915bf9000 REG > /mnt/vm1_smb/testfile.8853 > > > this process holds the inode->i_rwsem for the parent directory, is > laundering a page attached to the inode of the file it's opening, and in > _cifsFileInfo_put is trying to down_write the cifsInodeInflock_sem > for the file itself. > > > <struct rw_semaphore 0xffff8c68f3cd0ce0>: > owner: <struct task_struct 0xffff8c6914272e80> (UN - 8854 - > reopen_file), counter: 0x0000000000000003 > waitlist: 1 > 0xffff9965005dfd80 8855 reopen_file UN 0 1:29:22.912 > RWSEM_WAITING_FOR_WRITE > > this is the inode.i_rwsem for the file > > the owner: > > [0 00:48:22.739] [UN] PID: 8854 TASK: ffff8c6914272e80 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff99650054fb38] __schedule at ffffffff9b6e6095 > #1 [ffff99650054fbc8] schedule at ffffffff9b6e64df > #2 [ffff99650054fbd8] io_schedule at ffffffff9b6e68e2 > #3 [ffff99650054fbe8] __lock_page at ffffffff9b03c56f > #4 [ffff99650054fc80] pagecache_get_page at ffffffff9b03dcdf > #5 [ffff99650054fcc0] grab_cache_page_write_begin at ffffffff9b03ef4c > #6 [ffff99650054fcd0] cifs_write_begin at ffffffffc0a064ec [cifs] > #7 [ffff99650054fd30] generic_perform_write at ffffffff9b03bba4 > #8 [ffff99650054fda8] __generic_file_write_iter at ffffffff9b04060a > #9 [ffff99650054fdf0] cifs_strict_writev.cold.70 at ffffffffc0a4469b [cifs] > #10 [ffff99650054fe48] new_sync_write at ffffffff9b0ec1dd > #11 [ffff99650054fed0] vfs_write at ffffffff9b0eed35 > #12 [ffff99650054ff00] ksys_write at ffffffff9b0eefd9 > #13 [ffff99650054ff38] do_syscall_64 at ffffffff9ae04315 > > the process holds the inode->i_rwsem for the file to which it's writing, > and is trying to __lock_page for the same page as in the other processes > > > the other tasks: > [0 00:00:00.028] [UN] PID: 8859 TASK: ffff8c6915479740 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff9965007b39d8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007b3a68] schedule at ffffffff9b6e64df > #2 [ffff9965007b3a78] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007b3af0] msleep at ffffffff9af573a9 > #4 [ffff9965007b3af8] cifs_new_fileinfo.cold.61 at ffffffffc0a42a07 [cifs] > #5 [ffff9965007b3b78] cifs_open at ffffffffc0a0709d [cifs] > #6 [ffff9965007b3cd8] do_dentry_open at ffffffff9b0e9b7a > #7 [ffff9965007b3d08] path_openat at ffffffff9b0fe34f > #8 [ffff9965007b3dd8] do_filp_open at ffffffff9b100a33 > #9 [ffff9965007b3ee0] do_sys_open at ffffffff9b0eb2d6 > #10 [ffff9965007b3f38] do_syscall_64 at ffffffff9ae04315 > > this is opening the file, and is trying to down_write cinode->lock_sem > > > [0 00:00:00.041] [UN] PID: 8860 TASK: ffff8c691547ae80 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.057] [UN] PID: 8861 TASK: ffff8c6915478000 CPU: 3 > COMMAND: "reopen_file" > [0 00:00:00.059] [UN] PID: 8858 TASK: ffff8c6914271740 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.109] [UN] PID: 8862 TASK: ffff8c691547dd00 CPU: 6 > COMMAND: "reopen_file" > #0 [ffff9965007c3c78] __schedule at ffffffff9b6e6095 > #1 [ffff9965007c3d08] schedule at ffffffff9b6e64df > #2 [ffff9965007c3d18] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007c3d90] msleep at ffffffff9af573a9 > #4 [ffff9965007c3d98] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff9965007c3e88] cifs_close at ffffffffc0a07aaf [cifs] > #6 [ffff9965007c3ea0] __fput at ffffffff9b0efa6e > #7 [ffff9965007c3ee8] task_work_run at ffffffff9aef1614 > #8 [ffff9965007c3f20] exit_to_usermode_loop at ffffffff9ae03d6f > #9 [ffff9965007c3f38] do_syscall_64 at ffffffff9ae0444c > > closing the file, and trying to down_write cifsi->lock_sem > > > [0 00:48:22.839] [UN] PID: 8857 TASK: ffff8c6914270000 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965006a7cc8] __schedule at ffffffff9b6e6095 > #1 [ffff9965006a7d58] schedule at ffffffff9b6e64df > #2 [ffff9965006a7d68] io_schedule at ffffffff9b6e68e2 > #3 [ffff9965006a7d78] wait_on_page_bit at ffffffff9b03cac6 > #4 [ffff9965006a7e10] __filemap_fdatawait_range at ffffffff9b03b028 > #5 [ffff9965006a7ed8] filemap_write_and_wait at ffffffff9b040165 > #6 [ffff9965006a7ef0] cifs_flush at ffffffffc0a0c2fa [cifs] > #7 [ffff9965006a7f10] filp_close at ffffffff9b0e93f1 > #8 [ffff9965006a7f30] __x64_sys_close at ffffffff9b0e9a0e > #9 [ffff9965006a7f38] do_syscall_64 at ffffffff9ae04315 > > in __filemap_fdatawait_range > wait_on_page_writeback(page); > for the same page of the file > > > > [0 00:48:22.718] [UN] PID: 8855 TASK: ffff8c69142745c0 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965005dfc98] __schedule at ffffffff9b6e6095 > #1 [ffff9965005dfd28] schedule at ffffffff9b6e64df > #2 [ffff9965005dfd38] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965005dfdf0] cifs_strict_writev at ffffffffc0a0c40a [cifs] > #4 [ffff9965005dfe48] new_sync_write at ffffffff9b0ec1dd > #5 [ffff9965005dfed0] vfs_write at ffffffff9b0eed35 > #6 [ffff9965005dff00] ksys_write at ffffffff9b0eefd9 > #7 [ffff9965005dff38] do_syscall_64 at ffffffff9ae04315 > > inode_lock(inode); > > > and one 'ls' later on, to see whether the rest of the mount is available > (the test file is in the root, so we get blocked up on the directory > ->i_rwsem), so the entire mount is unavailable > > [0 00:36:26.473] [UN] PID: 9802 TASK: ffff8c691436ae80 CPU: 4 > COMMAND: "ls" > #0 [ffff996500393d28] __schedule at ffffffff9b6e6095 > #1 [ffff996500393db8] schedule at ffffffff9b6e64df > #2 [ffff996500393dc8] rwsem_down_read_slowpath at ffffffff9b6e9421 > #3 [ffff996500393e78] down_read_killable at ffffffff9b6e95e2 > #4 [ffff996500393e88] iterate_dir at ffffffff9b103c56 > #5 [ffff996500393ec8] ksys_getdents64 at ffffffff9b104b0c > #6 [ffff996500393f30] __x64_sys_getdents64 at ffffffff9b104bb6 > #7 [ffff996500393f38] do_syscall_64 at ffffffff9ae04315 > > in iterate_dir: > if (shared) > res = down_read_killable(&inode->i_rwsem); <<<< > else > res = down_write_killable(&inode->i_rwsem); > Reported-by: Frank Sorenson <sorenson@redhat.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-11-03 03:06:37 +00:00
_cifsFileInfo_put(cifs_file, true, true);
}
/**
* _cifsFileInfo_put - release a reference of file priv data
*
* This may involve closing the filehandle @cifs_file out on the
cifs: move cifsFileInfo_put logic into a work-queue This patch moves the final part of the cifsFileInfo_put() logic where we need a write lock on lock_sem to be processed in a separate thread that holds no other locks. This is to prevent deadlocks like the one below: > there are 6 processes looping to while trying to down_write > cinode->lock_sem, 5 of them from _cifsFileInfo_put, and one from > cifs_new_fileinfo > > and there are 5 other processes which are blocked, several of them > waiting on either PG_writeback or PG_locked (which are both set), all > for the same page of the file > > 2 inode_lock() (inode->i_rwsem) for the file > 1 wait_on_page_writeback() for the page > 1 down_read(inode->i_rwsem) for the inode of the directory > 1 inode_lock()(inode->i_rwsem) for the inode of the directory > 1 __lock_page > > > so processes are blocked waiting on: > page flags PG_locked and PG_writeback for one specific page > inode->i_rwsem for the directory > inode->i_rwsem for the file > cifsInodeInflock_sem > > > > here are the more gory details (let me know if I need to provide > anything more/better): > > [0 00:48:22.765] [UN] PID: 8863 TASK: ffff8c691547c5c0 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff9965007e3ba8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007e3c38] schedule at ffffffff9b6e64df > #2 [ffff9965007e3c48] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965007e3cb8] legitimize_path at ffffffff9b0f975d > #4 [ffff9965007e3d08] path_openat at ffffffff9b0fe55d > #5 [ffff9965007e3dd8] do_filp_open at ffffffff9b100a33 > #6 [ffff9965007e3ee0] do_sys_open at ffffffff9b0eb2d6 > #7 [ffff9965007e3f38] do_syscall_64 at ffffffff9ae04315 > * (I think legitimize_path is bogus) > > in path_openat > } else { > const char *s = path_init(nd, flags); > while (!(error = link_path_walk(s, nd)) && > (error = do_last(nd, file, op)) > 0) { <<<< > > do_last: > if (open_flag & O_CREAT) > inode_lock(dir->d_inode); <<<< > else > so it's trying to take inode->i_rwsem for the directory > > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68bb8e79c0 ffff8c691158ef20 ffff8c6915bf9000 DIR /mnt/vm1_smb/ > inode.i_rwsem is ffff8c691158efc0 > > <struct rw_semaphore 0xffff8c691158efc0>: > owner: <struct task_struct 0xffff8c6914275d00> (UN - 8856 - > reopen_file), counter: 0x0000000000000003 > waitlist: 2 > 0xffff9965007e3c90 8863 reopen_file UN 0 1:29:22.926 > RWSEM_WAITING_FOR_WRITE > 0xffff996500393e00 9802 ls UN 0 1:17:26.700 > RWSEM_WAITING_FOR_READ > > > the owner of the inode.i_rwsem of the directory is: > > [0 00:00:00.109] [UN] PID: 8856 TASK: ffff8c6914275d00 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff99650065b828] __schedule at ffffffff9b6e6095 > #1 [ffff99650065b8b8] schedule at ffffffff9b6e64df > #2 [ffff99650065b8c8] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff99650065b940] msleep at ffffffff9af573a9 > #4 [ffff99650065b948] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff99650065ba38] cifs_writepage_locked at ffffffffc0a0b8f3 [cifs] > #6 [ffff99650065bab0] cifs_launder_page at ffffffffc0a0bb72 [cifs] > #7 [ffff99650065bb30] invalidate_inode_pages2_range at ffffffff9b04d4bd > #8 [ffff99650065bcb8] cifs_invalidate_mapping at ffffffffc0a11339 [cifs] > #9 [ffff99650065bcd0] cifs_revalidate_mapping at ffffffffc0a1139a [cifs] > #10 [ffff99650065bcf0] cifs_d_revalidate at ffffffffc0a014f6 [cifs] > #11 [ffff99650065bd08] path_openat at ffffffff9b0fe7f7 > #12 [ffff99650065bdd8] do_filp_open at ffffffff9b100a33 > #13 [ffff99650065bee0] do_sys_open at ffffffff9b0eb2d6 > #14 [ffff99650065bf38] do_syscall_64 at ffffffff9ae04315 > > cifs_launder_page is for page 0xffffd1e2c07d2480 > > crash> page.index,mapping,flags 0xffffd1e2c07d2480 > index = 0x8 > mapping = 0xffff8c68f3cd0db0 > flags = 0xfffffc0008095 > > PAGE-FLAG BIT VALUE > PG_locked 0 0000001 > PG_uptodate 2 0000004 > PG_lru 4 0000010 > PG_waiters 7 0000080 > PG_writeback 15 0008000 > > > inode is ffff8c68f3cd0c40 > inode.i_rwsem is ffff8c68f3cd0ce0 > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68a1f1b480 ffff8c68f3cd0c40 ffff8c6915bf9000 REG > /mnt/vm1_smb/testfile.8853 > > > this process holds the inode->i_rwsem for the parent directory, is > laundering a page attached to the inode of the file it's opening, and in > _cifsFileInfo_put is trying to down_write the cifsInodeInflock_sem > for the file itself. > > > <struct rw_semaphore 0xffff8c68f3cd0ce0>: > owner: <struct task_struct 0xffff8c6914272e80> (UN - 8854 - > reopen_file), counter: 0x0000000000000003 > waitlist: 1 > 0xffff9965005dfd80 8855 reopen_file UN 0 1:29:22.912 > RWSEM_WAITING_FOR_WRITE > > this is the inode.i_rwsem for the file > > the owner: > > [0 00:48:22.739] [UN] PID: 8854 TASK: ffff8c6914272e80 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff99650054fb38] __schedule at ffffffff9b6e6095 > #1 [ffff99650054fbc8] schedule at ffffffff9b6e64df > #2 [ffff99650054fbd8] io_schedule at ffffffff9b6e68e2 > #3 [ffff99650054fbe8] __lock_page at ffffffff9b03c56f > #4 [ffff99650054fc80] pagecache_get_page at ffffffff9b03dcdf > #5 [ffff99650054fcc0] grab_cache_page_write_begin at ffffffff9b03ef4c > #6 [ffff99650054fcd0] cifs_write_begin at ffffffffc0a064ec [cifs] > #7 [ffff99650054fd30] generic_perform_write at ffffffff9b03bba4 > #8 [ffff99650054fda8] __generic_file_write_iter at ffffffff9b04060a > #9 [ffff99650054fdf0] cifs_strict_writev.cold.70 at ffffffffc0a4469b [cifs] > #10 [ffff99650054fe48] new_sync_write at ffffffff9b0ec1dd > #11 [ffff99650054fed0] vfs_write at ffffffff9b0eed35 > #12 [ffff99650054ff00] ksys_write at ffffffff9b0eefd9 > #13 [ffff99650054ff38] do_syscall_64 at ffffffff9ae04315 > > the process holds the inode->i_rwsem for the file to which it's writing, > and is trying to __lock_page for the same page as in the other processes > > > the other tasks: > [0 00:00:00.028] [UN] PID: 8859 TASK: ffff8c6915479740 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff9965007b39d8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007b3a68] schedule at ffffffff9b6e64df > #2 [ffff9965007b3a78] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007b3af0] msleep at ffffffff9af573a9 > #4 [ffff9965007b3af8] cifs_new_fileinfo.cold.61 at ffffffffc0a42a07 [cifs] > #5 [ffff9965007b3b78] cifs_open at ffffffffc0a0709d [cifs] > #6 [ffff9965007b3cd8] do_dentry_open at ffffffff9b0e9b7a > #7 [ffff9965007b3d08] path_openat at ffffffff9b0fe34f > #8 [ffff9965007b3dd8] do_filp_open at ffffffff9b100a33 > #9 [ffff9965007b3ee0] do_sys_open at ffffffff9b0eb2d6 > #10 [ffff9965007b3f38] do_syscall_64 at ffffffff9ae04315 > > this is opening the file, and is trying to down_write cinode->lock_sem > > > [0 00:00:00.041] [UN] PID: 8860 TASK: ffff8c691547ae80 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.057] [UN] PID: 8861 TASK: ffff8c6915478000 CPU: 3 > COMMAND: "reopen_file" > [0 00:00:00.059] [UN] PID: 8858 TASK: ffff8c6914271740 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.109] [UN] PID: 8862 TASK: ffff8c691547dd00 CPU: 6 > COMMAND: "reopen_file" > #0 [ffff9965007c3c78] __schedule at ffffffff9b6e6095 > #1 [ffff9965007c3d08] schedule at ffffffff9b6e64df > #2 [ffff9965007c3d18] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007c3d90] msleep at ffffffff9af573a9 > #4 [ffff9965007c3d98] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff9965007c3e88] cifs_close at ffffffffc0a07aaf [cifs] > #6 [ffff9965007c3ea0] __fput at ffffffff9b0efa6e > #7 [ffff9965007c3ee8] task_work_run at ffffffff9aef1614 > #8 [ffff9965007c3f20] exit_to_usermode_loop at ffffffff9ae03d6f > #9 [ffff9965007c3f38] do_syscall_64 at ffffffff9ae0444c > > closing the file, and trying to down_write cifsi->lock_sem > > > [0 00:48:22.839] [UN] PID: 8857 TASK: ffff8c6914270000 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965006a7cc8] __schedule at ffffffff9b6e6095 > #1 [ffff9965006a7d58] schedule at ffffffff9b6e64df > #2 [ffff9965006a7d68] io_schedule at ffffffff9b6e68e2 > #3 [ffff9965006a7d78] wait_on_page_bit at ffffffff9b03cac6 > #4 [ffff9965006a7e10] __filemap_fdatawait_range at ffffffff9b03b028 > #5 [ffff9965006a7ed8] filemap_write_and_wait at ffffffff9b040165 > #6 [ffff9965006a7ef0] cifs_flush at ffffffffc0a0c2fa [cifs] > #7 [ffff9965006a7f10] filp_close at ffffffff9b0e93f1 > #8 [ffff9965006a7f30] __x64_sys_close at ffffffff9b0e9a0e > #9 [ffff9965006a7f38] do_syscall_64 at ffffffff9ae04315 > > in __filemap_fdatawait_range > wait_on_page_writeback(page); > for the same page of the file > > > > [0 00:48:22.718] [UN] PID: 8855 TASK: ffff8c69142745c0 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965005dfc98] __schedule at ffffffff9b6e6095 > #1 [ffff9965005dfd28] schedule at ffffffff9b6e64df > #2 [ffff9965005dfd38] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965005dfdf0] cifs_strict_writev at ffffffffc0a0c40a [cifs] > #4 [ffff9965005dfe48] new_sync_write at ffffffff9b0ec1dd > #5 [ffff9965005dfed0] vfs_write at ffffffff9b0eed35 > #6 [ffff9965005dff00] ksys_write at ffffffff9b0eefd9 > #7 [ffff9965005dff38] do_syscall_64 at ffffffff9ae04315 > > inode_lock(inode); > > > and one 'ls' later on, to see whether the rest of the mount is available > (the test file is in the root, so we get blocked up on the directory > ->i_rwsem), so the entire mount is unavailable > > [0 00:36:26.473] [UN] PID: 9802 TASK: ffff8c691436ae80 CPU: 4 > COMMAND: "ls" > #0 [ffff996500393d28] __schedule at ffffffff9b6e6095 > #1 [ffff996500393db8] schedule at ffffffff9b6e64df > #2 [ffff996500393dc8] rwsem_down_read_slowpath at ffffffff9b6e9421 > #3 [ffff996500393e78] down_read_killable at ffffffff9b6e95e2 > #4 [ffff996500393e88] iterate_dir at ffffffff9b103c56 > #5 [ffff996500393ec8] ksys_getdents64 at ffffffff9b104b0c > #6 [ffff996500393f30] __x64_sys_getdents64 at ffffffff9b104bb6 > #7 [ffff996500393f38] do_syscall_64 at ffffffff9ae04315 > > in iterate_dir: > if (shared) > res = down_read_killable(&inode->i_rwsem); <<<< > else > res = down_write_killable(&inode->i_rwsem); > Reported-by: Frank Sorenson <sorenson@redhat.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-11-03 03:06:37 +00:00
* server. Must be called without holding tcon->open_file_lock,
* cinode->open_file_lock and cifs_file->file_info_lock.
*
* If @wait_for_oplock_handler is true and we are releasing the last
* reference, wait for any running oplock break handler of the file
* and cancel any pending one.
*
* @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
* @wait_oplock_handler: must be false if called from oplock_break_handler
* @offload: not offloaded on close and oplock breaks
*
*/
cifs: move cifsFileInfo_put logic into a work-queue This patch moves the final part of the cifsFileInfo_put() logic where we need a write lock on lock_sem to be processed in a separate thread that holds no other locks. This is to prevent deadlocks like the one below: > there are 6 processes looping to while trying to down_write > cinode->lock_sem, 5 of them from _cifsFileInfo_put, and one from > cifs_new_fileinfo > > and there are 5 other processes which are blocked, several of them > waiting on either PG_writeback or PG_locked (which are both set), all > for the same page of the file > > 2 inode_lock() (inode->i_rwsem) for the file > 1 wait_on_page_writeback() for the page > 1 down_read(inode->i_rwsem) for the inode of the directory > 1 inode_lock()(inode->i_rwsem) for the inode of the directory > 1 __lock_page > > > so processes are blocked waiting on: > page flags PG_locked and PG_writeback for one specific page > inode->i_rwsem for the directory > inode->i_rwsem for the file > cifsInodeInflock_sem > > > > here are the more gory details (let me know if I need to provide > anything more/better): > > [0 00:48:22.765] [UN] PID: 8863 TASK: ffff8c691547c5c0 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff9965007e3ba8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007e3c38] schedule at ffffffff9b6e64df > #2 [ffff9965007e3c48] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965007e3cb8] legitimize_path at ffffffff9b0f975d > #4 [ffff9965007e3d08] path_openat at ffffffff9b0fe55d > #5 [ffff9965007e3dd8] do_filp_open at ffffffff9b100a33 > #6 [ffff9965007e3ee0] do_sys_open at ffffffff9b0eb2d6 > #7 [ffff9965007e3f38] do_syscall_64 at ffffffff9ae04315 > * (I think legitimize_path is bogus) > > in path_openat > } else { > const char *s = path_init(nd, flags); > while (!(error = link_path_walk(s, nd)) && > (error = do_last(nd, file, op)) > 0) { <<<< > > do_last: > if (open_flag & O_CREAT) > inode_lock(dir->d_inode); <<<< > else > so it's trying to take inode->i_rwsem for the directory > > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68bb8e79c0 ffff8c691158ef20 ffff8c6915bf9000 DIR /mnt/vm1_smb/ > inode.i_rwsem is ffff8c691158efc0 > > <struct rw_semaphore 0xffff8c691158efc0>: > owner: <struct task_struct 0xffff8c6914275d00> (UN - 8856 - > reopen_file), counter: 0x0000000000000003 > waitlist: 2 > 0xffff9965007e3c90 8863 reopen_file UN 0 1:29:22.926 > RWSEM_WAITING_FOR_WRITE > 0xffff996500393e00 9802 ls UN 0 1:17:26.700 > RWSEM_WAITING_FOR_READ > > > the owner of the inode.i_rwsem of the directory is: > > [0 00:00:00.109] [UN] PID: 8856 TASK: ffff8c6914275d00 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff99650065b828] __schedule at ffffffff9b6e6095 > #1 [ffff99650065b8b8] schedule at ffffffff9b6e64df > #2 [ffff99650065b8c8] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff99650065b940] msleep at ffffffff9af573a9 > #4 [ffff99650065b948] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff99650065ba38] cifs_writepage_locked at ffffffffc0a0b8f3 [cifs] > #6 [ffff99650065bab0] cifs_launder_page at ffffffffc0a0bb72 [cifs] > #7 [ffff99650065bb30] invalidate_inode_pages2_range at ffffffff9b04d4bd > #8 [ffff99650065bcb8] cifs_invalidate_mapping at ffffffffc0a11339 [cifs] > #9 [ffff99650065bcd0] cifs_revalidate_mapping at ffffffffc0a1139a [cifs] > #10 [ffff99650065bcf0] cifs_d_revalidate at ffffffffc0a014f6 [cifs] > #11 [ffff99650065bd08] path_openat at ffffffff9b0fe7f7 > #12 [ffff99650065bdd8] do_filp_open at ffffffff9b100a33 > #13 [ffff99650065bee0] do_sys_open at ffffffff9b0eb2d6 > #14 [ffff99650065bf38] do_syscall_64 at ffffffff9ae04315 > > cifs_launder_page is for page 0xffffd1e2c07d2480 > > crash> page.index,mapping,flags 0xffffd1e2c07d2480 > index = 0x8 > mapping = 0xffff8c68f3cd0db0 > flags = 0xfffffc0008095 > > PAGE-FLAG BIT VALUE > PG_locked 0 0000001 > PG_uptodate 2 0000004 > PG_lru 4 0000010 > PG_waiters 7 0000080 > PG_writeback 15 0008000 > > > inode is ffff8c68f3cd0c40 > inode.i_rwsem is ffff8c68f3cd0ce0 > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68a1f1b480 ffff8c68f3cd0c40 ffff8c6915bf9000 REG > /mnt/vm1_smb/testfile.8853 > > > this process holds the inode->i_rwsem for the parent directory, is > laundering a page attached to the inode of the file it's opening, and in > _cifsFileInfo_put is trying to down_write the cifsInodeInflock_sem > for the file itself. > > > <struct rw_semaphore 0xffff8c68f3cd0ce0>: > owner: <struct task_struct 0xffff8c6914272e80> (UN - 8854 - > reopen_file), counter: 0x0000000000000003 > waitlist: 1 > 0xffff9965005dfd80 8855 reopen_file UN 0 1:29:22.912 > RWSEM_WAITING_FOR_WRITE > > this is the inode.i_rwsem for the file > > the owner: > > [0 00:48:22.739] [UN] PID: 8854 TASK: ffff8c6914272e80 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff99650054fb38] __schedule at ffffffff9b6e6095 > #1 [ffff99650054fbc8] schedule at ffffffff9b6e64df > #2 [ffff99650054fbd8] io_schedule at ffffffff9b6e68e2 > #3 [ffff99650054fbe8] __lock_page at ffffffff9b03c56f > #4 [ffff99650054fc80] pagecache_get_page at ffffffff9b03dcdf > #5 [ffff99650054fcc0] grab_cache_page_write_begin at ffffffff9b03ef4c > #6 [ffff99650054fcd0] cifs_write_begin at ffffffffc0a064ec [cifs] > #7 [ffff99650054fd30] generic_perform_write at ffffffff9b03bba4 > #8 [ffff99650054fda8] __generic_file_write_iter at ffffffff9b04060a > #9 [ffff99650054fdf0] cifs_strict_writev.cold.70 at ffffffffc0a4469b [cifs] > #10 [ffff99650054fe48] new_sync_write at ffffffff9b0ec1dd > #11 [ffff99650054fed0] vfs_write at ffffffff9b0eed35 > #12 [ffff99650054ff00] ksys_write at ffffffff9b0eefd9 > #13 [ffff99650054ff38] do_syscall_64 at ffffffff9ae04315 > > the process holds the inode->i_rwsem for the file to which it's writing, > and is trying to __lock_page for the same page as in the other processes > > > the other tasks: > [0 00:00:00.028] [UN] PID: 8859 TASK: ffff8c6915479740 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff9965007b39d8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007b3a68] schedule at ffffffff9b6e64df > #2 [ffff9965007b3a78] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007b3af0] msleep at ffffffff9af573a9 > #4 [ffff9965007b3af8] cifs_new_fileinfo.cold.61 at ffffffffc0a42a07 [cifs] > #5 [ffff9965007b3b78] cifs_open at ffffffffc0a0709d [cifs] > #6 [ffff9965007b3cd8] do_dentry_open at ffffffff9b0e9b7a > #7 [ffff9965007b3d08] path_openat at ffffffff9b0fe34f > #8 [ffff9965007b3dd8] do_filp_open at ffffffff9b100a33 > #9 [ffff9965007b3ee0] do_sys_open at ffffffff9b0eb2d6 > #10 [ffff9965007b3f38] do_syscall_64 at ffffffff9ae04315 > > this is opening the file, and is trying to down_write cinode->lock_sem > > > [0 00:00:00.041] [UN] PID: 8860 TASK: ffff8c691547ae80 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.057] [UN] PID: 8861 TASK: ffff8c6915478000 CPU: 3 > COMMAND: "reopen_file" > [0 00:00:00.059] [UN] PID: 8858 TASK: ffff8c6914271740 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.109] [UN] PID: 8862 TASK: ffff8c691547dd00 CPU: 6 > COMMAND: "reopen_file" > #0 [ffff9965007c3c78] __schedule at ffffffff9b6e6095 > #1 [ffff9965007c3d08] schedule at ffffffff9b6e64df > #2 [ffff9965007c3d18] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007c3d90] msleep at ffffffff9af573a9 > #4 [ffff9965007c3d98] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff9965007c3e88] cifs_close at ffffffffc0a07aaf [cifs] > #6 [ffff9965007c3ea0] __fput at ffffffff9b0efa6e > #7 [ffff9965007c3ee8] task_work_run at ffffffff9aef1614 > #8 [ffff9965007c3f20] exit_to_usermode_loop at ffffffff9ae03d6f > #9 [ffff9965007c3f38] do_syscall_64 at ffffffff9ae0444c > > closing the file, and trying to down_write cifsi->lock_sem > > > [0 00:48:22.839] [UN] PID: 8857 TASK: ffff8c6914270000 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965006a7cc8] __schedule at ffffffff9b6e6095 > #1 [ffff9965006a7d58] schedule at ffffffff9b6e64df > #2 [ffff9965006a7d68] io_schedule at ffffffff9b6e68e2 > #3 [ffff9965006a7d78] wait_on_page_bit at ffffffff9b03cac6 > #4 [ffff9965006a7e10] __filemap_fdatawait_range at ffffffff9b03b028 > #5 [ffff9965006a7ed8] filemap_write_and_wait at ffffffff9b040165 > #6 [ffff9965006a7ef0] cifs_flush at ffffffffc0a0c2fa [cifs] > #7 [ffff9965006a7f10] filp_close at ffffffff9b0e93f1 > #8 [ffff9965006a7f30] __x64_sys_close at ffffffff9b0e9a0e > #9 [ffff9965006a7f38] do_syscall_64 at ffffffff9ae04315 > > in __filemap_fdatawait_range > wait_on_page_writeback(page); > for the same page of the file > > > > [0 00:48:22.718] [UN] PID: 8855 TASK: ffff8c69142745c0 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965005dfc98] __schedule at ffffffff9b6e6095 > #1 [ffff9965005dfd28] schedule at ffffffff9b6e64df > #2 [ffff9965005dfd38] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965005dfdf0] cifs_strict_writev at ffffffffc0a0c40a [cifs] > #4 [ffff9965005dfe48] new_sync_write at ffffffff9b0ec1dd > #5 [ffff9965005dfed0] vfs_write at ffffffff9b0eed35 > #6 [ffff9965005dff00] ksys_write at ffffffff9b0eefd9 > #7 [ffff9965005dff38] do_syscall_64 at ffffffff9ae04315 > > inode_lock(inode); > > > and one 'ls' later on, to see whether the rest of the mount is available > (the test file is in the root, so we get blocked up on the directory > ->i_rwsem), so the entire mount is unavailable > > [0 00:36:26.473] [UN] PID: 9802 TASK: ffff8c691436ae80 CPU: 4 > COMMAND: "ls" > #0 [ffff996500393d28] __schedule at ffffffff9b6e6095 > #1 [ffff996500393db8] schedule at ffffffff9b6e64df > #2 [ffff996500393dc8] rwsem_down_read_slowpath at ffffffff9b6e9421 > #3 [ffff996500393e78] down_read_killable at ffffffff9b6e95e2 > #4 [ffff996500393e88] iterate_dir at ffffffff9b103c56 > #5 [ffff996500393ec8] ksys_getdents64 at ffffffff9b104b0c > #6 [ffff996500393f30] __x64_sys_getdents64 at ffffffff9b104bb6 > #7 [ffff996500393f38] do_syscall_64 at ffffffff9ae04315 > > in iterate_dir: > if (shared) > res = down_read_killable(&inode->i_rwsem); <<<< > else > res = down_write_killable(&inode->i_rwsem); > Reported-by: Frank Sorenson <sorenson@redhat.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-11-03 03:06:37 +00:00
void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
bool wait_oplock_handler, bool offload)
{
struct inode *inode = d_inode(cifs_file->dentry);
struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct super_block *sb = inode->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_fid fid;
struct cifs_pending_open open;
bool oplock_break_cancelled;
spin_lock(&tcon->open_file_lock);
spin_lock(&cifsi->open_file_lock);
spin_lock(&cifs_file->file_info_lock);
if (--cifs_file->count > 0) {
spin_unlock(&cifs_file->file_info_lock);
spin_unlock(&cifsi->open_file_lock);
spin_unlock(&tcon->open_file_lock);
return;
}
spin_unlock(&cifs_file->file_info_lock);
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &fid);
/* store open in pending opens to make sure we don't miss lease break */
cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
/* remove it from the lists */
list_del(&cifs_file->flist);
list_del(&cifs_file->tlist);
atomic_dec(&tcon->num_local_opens);
if (list_empty(&cifsi->openFileList)) {
cifs_dbg(FYI, "closing last open instance for inode %p\n",
d_inode(cifs_file->dentry));
/*
* In strict cache mode we need invalidate mapping on the last
* close because it may cause a error when we open this file
* again and get at least level II oplock.
*/
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
cifs_set_oplock_level(cifsi, 0);
}
spin_unlock(&cifsi->open_file_lock);
spin_unlock(&tcon->open_file_lock);
oplock_break_cancelled = wait_oplock_handler ?
cancel_work_sync(&cifs_file->oplock_break) : false;
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
struct TCP_Server_Info *server = tcon->ses->server;
unsigned int xid;
xid = get_xid();
if (server->ops->close_getattr)
server->ops->close_getattr(xid, tcon, cifs_file);
else if (server->ops->close)
server->ops->close(xid, tcon, &cifs_file->fid);
_free_xid(xid);
}
if (oplock_break_cancelled)
cifs_done_oplock_break(cifsi);
cifs_del_pending_open(&open);
cifs: move cifsFileInfo_put logic into a work-queue This patch moves the final part of the cifsFileInfo_put() logic where we need a write lock on lock_sem to be processed in a separate thread that holds no other locks. This is to prevent deadlocks like the one below: > there are 6 processes looping to while trying to down_write > cinode->lock_sem, 5 of them from _cifsFileInfo_put, and one from > cifs_new_fileinfo > > and there are 5 other processes which are blocked, several of them > waiting on either PG_writeback or PG_locked (which are both set), all > for the same page of the file > > 2 inode_lock() (inode->i_rwsem) for the file > 1 wait_on_page_writeback() for the page > 1 down_read(inode->i_rwsem) for the inode of the directory > 1 inode_lock()(inode->i_rwsem) for the inode of the directory > 1 __lock_page > > > so processes are blocked waiting on: > page flags PG_locked and PG_writeback for one specific page > inode->i_rwsem for the directory > inode->i_rwsem for the file > cifsInodeInflock_sem > > > > here are the more gory details (let me know if I need to provide > anything more/better): > > [0 00:48:22.765] [UN] PID: 8863 TASK: ffff8c691547c5c0 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff9965007e3ba8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007e3c38] schedule at ffffffff9b6e64df > #2 [ffff9965007e3c48] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965007e3cb8] legitimize_path at ffffffff9b0f975d > #4 [ffff9965007e3d08] path_openat at ffffffff9b0fe55d > #5 [ffff9965007e3dd8] do_filp_open at ffffffff9b100a33 > #6 [ffff9965007e3ee0] do_sys_open at ffffffff9b0eb2d6 > #7 [ffff9965007e3f38] do_syscall_64 at ffffffff9ae04315 > * (I think legitimize_path is bogus) > > in path_openat > } else { > const char *s = path_init(nd, flags); > while (!(error = link_path_walk(s, nd)) && > (error = do_last(nd, file, op)) > 0) { <<<< > > do_last: > if (open_flag & O_CREAT) > inode_lock(dir->d_inode); <<<< > else > so it's trying to take inode->i_rwsem for the directory > > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68bb8e79c0 ffff8c691158ef20 ffff8c6915bf9000 DIR /mnt/vm1_smb/ > inode.i_rwsem is ffff8c691158efc0 > > <struct rw_semaphore 0xffff8c691158efc0>: > owner: <struct task_struct 0xffff8c6914275d00> (UN - 8856 - > reopen_file), counter: 0x0000000000000003 > waitlist: 2 > 0xffff9965007e3c90 8863 reopen_file UN 0 1:29:22.926 > RWSEM_WAITING_FOR_WRITE > 0xffff996500393e00 9802 ls UN 0 1:17:26.700 > RWSEM_WAITING_FOR_READ > > > the owner of the inode.i_rwsem of the directory is: > > [0 00:00:00.109] [UN] PID: 8856 TASK: ffff8c6914275d00 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff99650065b828] __schedule at ffffffff9b6e6095 > #1 [ffff99650065b8b8] schedule at ffffffff9b6e64df > #2 [ffff99650065b8c8] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff99650065b940] msleep at ffffffff9af573a9 > #4 [ffff99650065b948] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff99650065ba38] cifs_writepage_locked at ffffffffc0a0b8f3 [cifs] > #6 [ffff99650065bab0] cifs_launder_page at ffffffffc0a0bb72 [cifs] > #7 [ffff99650065bb30] invalidate_inode_pages2_range at ffffffff9b04d4bd > #8 [ffff99650065bcb8] cifs_invalidate_mapping at ffffffffc0a11339 [cifs] > #9 [ffff99650065bcd0] cifs_revalidate_mapping at ffffffffc0a1139a [cifs] > #10 [ffff99650065bcf0] cifs_d_revalidate at ffffffffc0a014f6 [cifs] > #11 [ffff99650065bd08] path_openat at ffffffff9b0fe7f7 > #12 [ffff99650065bdd8] do_filp_open at ffffffff9b100a33 > #13 [ffff99650065bee0] do_sys_open at ffffffff9b0eb2d6 > #14 [ffff99650065bf38] do_syscall_64 at ffffffff9ae04315 > > cifs_launder_page is for page 0xffffd1e2c07d2480 > > crash> page.index,mapping,flags 0xffffd1e2c07d2480 > index = 0x8 > mapping = 0xffff8c68f3cd0db0 > flags = 0xfffffc0008095 > > PAGE-FLAG BIT VALUE > PG_locked 0 0000001 > PG_uptodate 2 0000004 > PG_lru 4 0000010 > PG_waiters 7 0000080 > PG_writeback 15 0008000 > > > inode is ffff8c68f3cd0c40 > inode.i_rwsem is ffff8c68f3cd0ce0 > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68a1f1b480 ffff8c68f3cd0c40 ffff8c6915bf9000 REG > /mnt/vm1_smb/testfile.8853 > > > this process holds the inode->i_rwsem for the parent directory, is > laundering a page attached to the inode of the file it's opening, and in > _cifsFileInfo_put is trying to down_write the cifsInodeInflock_sem > for the file itself. > > > <struct rw_semaphore 0xffff8c68f3cd0ce0>: > owner: <struct task_struct 0xffff8c6914272e80> (UN - 8854 - > reopen_file), counter: 0x0000000000000003 > waitlist: 1 > 0xffff9965005dfd80 8855 reopen_file UN 0 1:29:22.912 > RWSEM_WAITING_FOR_WRITE > > this is the inode.i_rwsem for the file > > the owner: > > [0 00:48:22.739] [UN] PID: 8854 TASK: ffff8c6914272e80 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff99650054fb38] __schedule at ffffffff9b6e6095 > #1 [ffff99650054fbc8] schedule at ffffffff9b6e64df > #2 [ffff99650054fbd8] io_schedule at ffffffff9b6e68e2 > #3 [ffff99650054fbe8] __lock_page at ffffffff9b03c56f > #4 [ffff99650054fc80] pagecache_get_page at ffffffff9b03dcdf > #5 [ffff99650054fcc0] grab_cache_page_write_begin at ffffffff9b03ef4c > #6 [ffff99650054fcd0] cifs_write_begin at ffffffffc0a064ec [cifs] > #7 [ffff99650054fd30] generic_perform_write at ffffffff9b03bba4 > #8 [ffff99650054fda8] __generic_file_write_iter at ffffffff9b04060a > #9 [ffff99650054fdf0] cifs_strict_writev.cold.70 at ffffffffc0a4469b [cifs] > #10 [ffff99650054fe48] new_sync_write at ffffffff9b0ec1dd > #11 [ffff99650054fed0] vfs_write at ffffffff9b0eed35 > #12 [ffff99650054ff00] ksys_write at ffffffff9b0eefd9 > #13 [ffff99650054ff38] do_syscall_64 at ffffffff9ae04315 > > the process holds the inode->i_rwsem for the file to which it's writing, > and is trying to __lock_page for the same page as in the other processes > > > the other tasks: > [0 00:00:00.028] [UN] PID: 8859 TASK: ffff8c6915479740 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff9965007b39d8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007b3a68] schedule at ffffffff9b6e64df > #2 [ffff9965007b3a78] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007b3af0] msleep at ffffffff9af573a9 > #4 [ffff9965007b3af8] cifs_new_fileinfo.cold.61 at ffffffffc0a42a07 [cifs] > #5 [ffff9965007b3b78] cifs_open at ffffffffc0a0709d [cifs] > #6 [ffff9965007b3cd8] do_dentry_open at ffffffff9b0e9b7a > #7 [ffff9965007b3d08] path_openat at ffffffff9b0fe34f > #8 [ffff9965007b3dd8] do_filp_open at ffffffff9b100a33 > #9 [ffff9965007b3ee0] do_sys_open at ffffffff9b0eb2d6 > #10 [ffff9965007b3f38] do_syscall_64 at ffffffff9ae04315 > > this is opening the file, and is trying to down_write cinode->lock_sem > > > [0 00:00:00.041] [UN] PID: 8860 TASK: ffff8c691547ae80 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.057] [UN] PID: 8861 TASK: ffff8c6915478000 CPU: 3 > COMMAND: "reopen_file" > [0 00:00:00.059] [UN] PID: 8858 TASK: ffff8c6914271740 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.109] [UN] PID: 8862 TASK: ffff8c691547dd00 CPU: 6 > COMMAND: "reopen_file" > #0 [ffff9965007c3c78] __schedule at ffffffff9b6e6095 > #1 [ffff9965007c3d08] schedule at ffffffff9b6e64df > #2 [ffff9965007c3d18] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007c3d90] msleep at ffffffff9af573a9 > #4 [ffff9965007c3d98] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff9965007c3e88] cifs_close at ffffffffc0a07aaf [cifs] > #6 [ffff9965007c3ea0] __fput at ffffffff9b0efa6e > #7 [ffff9965007c3ee8] task_work_run at ffffffff9aef1614 > #8 [ffff9965007c3f20] exit_to_usermode_loop at ffffffff9ae03d6f > #9 [ffff9965007c3f38] do_syscall_64 at ffffffff9ae0444c > > closing the file, and trying to down_write cifsi->lock_sem > > > [0 00:48:22.839] [UN] PID: 8857 TASK: ffff8c6914270000 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965006a7cc8] __schedule at ffffffff9b6e6095 > #1 [ffff9965006a7d58] schedule at ffffffff9b6e64df > #2 [ffff9965006a7d68] io_schedule at ffffffff9b6e68e2 > #3 [ffff9965006a7d78] wait_on_page_bit at ffffffff9b03cac6 > #4 [ffff9965006a7e10] __filemap_fdatawait_range at ffffffff9b03b028 > #5 [ffff9965006a7ed8] filemap_write_and_wait at ffffffff9b040165 > #6 [ffff9965006a7ef0] cifs_flush at ffffffffc0a0c2fa [cifs] > #7 [ffff9965006a7f10] filp_close at ffffffff9b0e93f1 > #8 [ffff9965006a7f30] __x64_sys_close at ffffffff9b0e9a0e > #9 [ffff9965006a7f38] do_syscall_64 at ffffffff9ae04315 > > in __filemap_fdatawait_range > wait_on_page_writeback(page); > for the same page of the file > > > > [0 00:48:22.718] [UN] PID: 8855 TASK: ffff8c69142745c0 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965005dfc98] __schedule at ffffffff9b6e6095 > #1 [ffff9965005dfd28] schedule at ffffffff9b6e64df > #2 [ffff9965005dfd38] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965005dfdf0] cifs_strict_writev at ffffffffc0a0c40a [cifs] > #4 [ffff9965005dfe48] new_sync_write at ffffffff9b0ec1dd > #5 [ffff9965005dfed0] vfs_write at ffffffff9b0eed35 > #6 [ffff9965005dff00] ksys_write at ffffffff9b0eefd9 > #7 [ffff9965005dff38] do_syscall_64 at ffffffff9ae04315 > > inode_lock(inode); > > > and one 'ls' later on, to see whether the rest of the mount is available > (the test file is in the root, so we get blocked up on the directory > ->i_rwsem), so the entire mount is unavailable > > [0 00:36:26.473] [UN] PID: 9802 TASK: ffff8c691436ae80 CPU: 4 > COMMAND: "ls" > #0 [ffff996500393d28] __schedule at ffffffff9b6e6095 > #1 [ffff996500393db8] schedule at ffffffff9b6e64df > #2 [ffff996500393dc8] rwsem_down_read_slowpath at ffffffff9b6e9421 > #3 [ffff996500393e78] down_read_killable at ffffffff9b6e95e2 > #4 [ffff996500393e88] iterate_dir at ffffffff9b103c56 > #5 [ffff996500393ec8] ksys_getdents64 at ffffffff9b104b0c > #6 [ffff996500393f30] __x64_sys_getdents64 at ffffffff9b104bb6 > #7 [ffff996500393f38] do_syscall_64 at ffffffff9ae04315 > > in iterate_dir: > if (shared) > res = down_read_killable(&inode->i_rwsem); <<<< > else > res = down_write_killable(&inode->i_rwsem); > Reported-by: Frank Sorenson <sorenson@redhat.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-11-03 03:06:37 +00:00
if (offload)
queue_work(fileinfo_put_wq, &cifs_file->put);
else
cifsFileInfo_put_final(cifs_file);
}
int cifs_open(struct inode *inode, struct file *file)
{
int rc = -EACCES;
unsigned int xid;
__u32 oplock;
struct cifs_sb_info *cifs_sb;
struct TCP_Server_Info *server;
struct cifs_tcon *tcon;
struct tcon_link *tlink;
struct cifsFileInfo *cfile = NULL;
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
void *page;
const char *full_path;
bool posix_open_ok = false;
struct cifs_fid fid;
struct cifs_pending_open open;
xid = get_xid();
cifs_sb = CIFS_SB(inode->i_sb);
if (unlikely(cifs_forced_shutdown(cifs_sb))) {
free_xid(xid);
return -EIO;
}
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
free_xid(xid);
return PTR_ERR(tlink);
}
tcon = tlink_tcon(tlink);
server = tcon->ses->server;
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
page = alloc_dentry_path();
full_path = build_path_from_dentry(file_dentry(file), page);
if (IS_ERR(full_path)) {
rc = PTR_ERR(full_path);
goto out;
}
cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
inode, file->f_flags, full_path);
if (file->f_flags & O_DIRECT &&
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
file->f_op = &cifs_file_direct_nobrl_ops;
else
file->f_op = &cifs_file_direct_ops;
}
/* Get the cached handle as SMB2 close is deferred */
rc = cifs_get_readable_path(tcon, full_path, &cfile);
if (rc == 0) {
if (file->f_flags == cfile->f_flags) {
file->private_data = cfile;
spin_lock(&CIFS_I(inode)->deferred_lock);
cifs_del_deferred_close(cfile);
spin_unlock(&CIFS_I(inode)->deferred_lock);
goto out;
} else {
_cifsFileInfo_put(cfile, true, false);
}
}
if (server->oplocks)
oplock = REQ_OPLOCK;
else
oplock = 0;
if (!tcon->broken_posix_open && tcon->unix_ext &&
cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
/* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, &inode, inode->i_sb,
cifs_sb->ctx->file_mode /* ignored */,
file->f_flags, &oplock, &fid.netfid, xid);
if (rc == 0) {
cifs_dbg(FYI, "posix open succeeded\n");
posix_open_ok = true;
} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
if (tcon->ses->serverNOS)
cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
tcon->ses->ip_addr,
tcon->ses->serverNOS);
tcon->broken_posix_open = true;
} else if ((rc != -EIO) && (rc != -EREMOTE) &&
(rc != -EOPNOTSUPP)) /* path not found or net err */
goto out;
/*
* Else fallthrough to retry open the old way on network i/o
* or DFS errors.
*/
}
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &fid);
cifs_add_pending_open(&fid, tlink, &open);
if (!posix_open_ok) {
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &fid);
rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
file->f_flags, &oplock, &fid, xid);
if (rc) {
cifs_del_pending_open(&open);
goto out;
}
}
cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
if (cfile == NULL) {
if (server->ops->close)
server->ops->close(xid, tcon, &fid);
cifs_del_pending_open(&open);
rc = -ENOMEM;
goto out;
}
cifs_fscache_set_inode_cookie(inode, file);
if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
/*
* Time to set mode which we can not set earlier due to
* problems creating new read-only files.
*/
struct cifs_unix_set_info_args args = {
.mode = inode->i_mode,
.uid = INVALID_UID, /* no change */
.gid = INVALID_GID, /* no change */
.ctime = NO_CHANGE_64,
.atime = NO_CHANGE_64,
.mtime = NO_CHANGE_64,
.device = 0,
};
CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
cfile->pid);
}
out:
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
free_dentry_path(page);
free_xid(xid);
cifs_put_tlink(tlink);
return rc;
}
static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
/*
* Try to reacquire byte range locks that were released when session
* to server was lost.
*/
static int
cifs_relock_file(struct cifsFileInfo *cfile)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;
CIFS: silence lockdep splat in cifs_relock_file() cifs_relock_file() can perform a down_write() on the inode's lock_sem even though it was already performed in cifs_strict_readv(). Lockdep complains about this. AFAICS, there is no problem here, and lockdep just needs to be told that this nesting is OK. ============================================= [ INFO: possible recursive locking detected ] 4.11.0+ #20 Not tainted --------------------------------------------- cat/701 is trying to acquire lock: (&cifsi->lock_sem){++++.+}, at: cifs_reopen_file+0x7a7/0xc00 but task is already holding lock: (&cifsi->lock_sem){++++.+}, at: cifs_strict_readv+0x177/0x310 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&cifsi->lock_sem); lock(&cifsi->lock_sem); *** DEADLOCK *** May be due to missing lock nesting notation 1 lock held by cat/701: #0: (&cifsi->lock_sem){++++.+}, at: cifs_strict_readv+0x177/0x310 stack backtrace: CPU: 0 PID: 701 Comm: cat Not tainted 4.11.0+ #20 Call Trace: dump_stack+0x85/0xc2 __lock_acquire+0x17dd/0x2260 ? trace_hardirqs_on_thunk+0x1a/0x1c ? preempt_schedule_irq+0x6b/0x80 lock_acquire+0xcc/0x260 ? lock_acquire+0xcc/0x260 ? cifs_reopen_file+0x7a7/0xc00 down_read+0x2d/0x70 ? cifs_reopen_file+0x7a7/0xc00 cifs_reopen_file+0x7a7/0xc00 ? printk+0x43/0x4b cifs_readpage_worker+0x327/0x8a0 cifs_readpage+0x8c/0x2a0 generic_file_read_iter+0x692/0xd00 cifs_strict_readv+0x29f/0x310 generic_file_splice_read+0x11c/0x1c0 do_splice_to+0xa5/0xc0 splice_direct_to_actor+0xfa/0x350 ? generic_pipe_buf_nosteal+0x10/0x10 do_splice_direct+0xb5/0xe0 do_sendfile+0x278/0x3a0 SyS_sendfile64+0xc4/0xe0 entry_SYSCALL_64_fastpath+0x1f/0xbe Signed-off-by: Rabin Vincent <rabinv@axis.com> Acked-by: Pavel Shilovsky <pshilov@microsoft.com> Signed-off-by: Steve French <smfrench@gmail.com>
2017-05-03 15:17:21 +00:00
down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
if (cinode->can_cache_brlcks) {
/* can cache locks - no need to relock */
up_read(&cinode->lock_sem);
return rc;
}
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
rc = cifs_push_posix_locks(cfile);
else
rc = tcon->ses->server->ops->push_mand_locks(cfile);
up_read(&cinode->lock_sem);
return rc;
}
static int
cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
{
int rc = -EACCES;
unsigned int xid;
__u32 oplock;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
struct cifsInodeInfo *cinode;
struct inode *inode;
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
void *page;
const char *full_path;
int desired_access;
int disposition = FILE_OPEN;
int create_options = CREATE_NOT_DIR;
struct cifs_open_parms oparms;
xid = get_xid();
mutex_lock(&cfile->fh_mutex);
if (!cfile->invalidHandle) {
mutex_unlock(&cfile->fh_mutex);
free_xid(xid);
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
return 0;
}
inode = d_inode(cfile->dentry);
cifs_sb = CIFS_SB(inode->i_sb);
tcon = tlink_tcon(cfile->tlink);
server = tcon->ses->server;
/*
* Can not grab rename sem here because various ops, including those
* that already have the rename sem can end up causing writepage to get
* called and if the server was down that means we end up here, and we
* can never tell if the caller already has the rename_sem.
*/
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
page = alloc_dentry_path();
full_path = build_path_from_dentry(cfile->dentry, page);
if (IS_ERR(full_path)) {
mutex_unlock(&cfile->fh_mutex);
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
free_dentry_path(page);
free_xid(xid);
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
return PTR_ERR(full_path);
}
cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
inode, cfile->f_flags, full_path);
if (tcon->ses->server->oplocks)
oplock = REQ_OPLOCK;
else
oplock = 0;
if (tcon->unix_ext && cap_unix(tcon->ses) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
/*
* O_CREAT, O_EXCL and O_TRUNC already had their effect on the
* original open. Must mask them off for a reopen.
*/
unsigned int oflags = cfile->f_flags &
~(O_CREAT | O_EXCL | O_TRUNC);
rc = cifs_posix_open(full_path, NULL, inode->i_sb,
cifs_sb->ctx->file_mode /* ignored */,
oflags, &oplock, &cfile->fid.netfid, xid);
if (rc == 0) {
cifs_dbg(FYI, "posix reopen succeeded\n");
oparms.reconnect = true;
goto reopen_success;
}
/*
* fallthrough to retry open the old way on errors, especially
* in the reconnect path it is important to retry hard
*/
}
desired_access = cifs_convert_flags(cfile->f_flags);
/* O_SYNC also has bit for O_DSYNC so following check picks up either */
if (cfile->f_flags & O_SYNC)
create_options |= CREATE_WRITE_THROUGH;
if (cfile->f_flags & O_DIRECT)
create_options |= CREATE_NO_BUFFER;
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &cfile->fid);
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = desired_access;
oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.disposition = disposition;
oparms.path = full_path;
oparms.fid = &cfile->fid;
oparms.reconnect = true;
/*
* Can not refresh inode by passing in file_info buf to be returned by
* ops->open and then calling get_inode_info with returned buf since
* file might have write behind data that needs to be flushed and server
* version of file size can be stale. If we knew for sure that inode was
* not dirty locally we could do this.
*/
rc = server->ops->open(xid, &oparms, &oplock, NULL);
if (rc == -ENOENT && oparms.reconnect == false) {
/* durable handle timeout is expired - open the file again */
rc = server->ops->open(xid, &oparms, &oplock, NULL);
/* indicate that we need to relock the file */
oparms.reconnect = true;
}
if (rc) {
mutex_unlock(&cfile->fh_mutex);
cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
cifs_dbg(FYI, "oplock: %d\n", oplock);
goto reopen_error_exit;
}
reopen_success:
cfile->invalidHandle = false;
mutex_unlock(&cfile->fh_mutex);
cinode = CIFS_I(inode);
if (can_flush) {
rc = filemap_write_and_wait(inode->i_mapping);
if (!is_interrupt_error(rc))
mapping_set_error(inode->i_mapping, rc);
if (tcon->posix_extensions)
rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid);
else if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path,
inode->i_sb, xid);
else
rc = cifs_get_inode_info(&inode, full_path, NULL,
inode->i_sb, xid, NULL);
}
/*
* Else we are writing out data to server already and could deadlock if
* we tried to flush data, and since we do not know if we have data that
* would invalidate the current end of file on the server we can not go
* to the server to get the new inode info.
*/
/*
* If the server returned a read oplock and we have mandatory brlocks,
* set oplock level to None.
*/
if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
oplock = 0;
}
server->ops->set_fid(cfile, &cfile->fid, oplock);
if (oparms.reconnect)
cifs_relock_file(cfile);
reopen_error_exit:
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
free_dentry_path(page);
free_xid(xid);
return rc;
}
void smb2_deferred_work_close(struct work_struct *work)
{
struct cifsFileInfo *cfile = container_of(work,
struct cifsFileInfo, deferred.work);
spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
cifs_del_deferred_close(cfile);
cfile->deferred_close_scheduled = false;
spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
_cifsFileInfo_put(cfile, true, false);
}
int cifs_close(struct inode *inode, struct file *file)
{
struct cifsFileInfo *cfile;
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_deferred_close *dclose;
if (file->private_data != NULL) {
cfile = file->private_data;
file->private_data = NULL;
dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
cinode->lease_granted &&
!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
dclose) {
if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
inode->i_ctime = inode->i_mtime = current_time(inode);
cifs_fscache_update_inode_cookie(inode);
}
spin_lock(&cinode->deferred_lock);
cifs_add_deferred_close(cfile, dclose);
if (cfile->deferred_close_scheduled &&
delayed_work_pending(&cfile->deferred)) {
Fix KASAN identified use-after-free issue. [ 612.157429] ================================================================== [ 612.158275] BUG: KASAN: use-after-free in process_one_work+0x90/0x9b0 [ 612.158801] Read of size 8 at addr ffff88810a31ca60 by task kworker/2:9/2382 [ 612.159611] CPU: 2 PID: 2382 Comm: kworker/2:9 Tainted: G OE 5.13.0-rc2+ #98 [ 612.159623] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-1.fc33 04/01/2014 [ 612.159640] Workqueue: 0x0 (deferredclose) [ 612.159669] Call Trace: [ 612.159685] dump_stack+0xbb/0x107 [ 612.159711] print_address_description.constprop.0+0x18/0x140 [ 612.159733] ? process_one_work+0x90/0x9b0 [ 612.159743] ? process_one_work+0x90/0x9b0 [ 612.159754] kasan_report.cold+0x7c/0xd8 [ 612.159778] ? lock_is_held_type+0x80/0x130 [ 612.159789] ? process_one_work+0x90/0x9b0 [ 612.159812] kasan_check_range+0x145/0x1a0 [ 612.159834] process_one_work+0x90/0x9b0 [ 612.159877] ? pwq_dec_nr_in_flight+0x110/0x110 [ 612.159914] ? spin_bug+0x90/0x90 [ 612.159967] worker_thread+0x3b6/0x6c0 [ 612.160023] ? process_one_work+0x9b0/0x9b0 [ 612.160038] kthread+0x1dc/0x200 [ 612.160051] ? kthread_create_worker_on_cpu+0xd0/0xd0 [ 612.160092] ret_from_fork+0x1f/0x30 [ 612.160399] Allocated by task 2358: [ 612.160757] kasan_save_stack+0x1b/0x40 [ 612.160768] __kasan_kmalloc+0x9b/0xd0 [ 612.160778] cifs_new_fileinfo+0xb0/0x960 [cifs] [ 612.161170] cifs_open+0xadf/0xf20 [cifs] [ 612.161421] do_dentry_open+0x2aa/0x6b0 [ 612.161432] path_openat+0xbd9/0xfa0 [ 612.161441] do_filp_open+0x11d/0x230 [ 612.161450] do_sys_openat2+0x115/0x240 [ 612.161460] __x64_sys_openat+0xce/0x140 When mod_delayed_work is called to modify the delay of pending work, it might return false and queue a new work when pending work is already scheduled or when try to grab pending work failed. So, Increase the reference count when new work is scheduled to avoid use-after-free. Signed-off-by: Rohith Surabattula <rohiths@microsoft.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-05-20 16:45:01 +00:00
/*
* If there is no pending work, mod_delayed_work queues new work.
* So, Increase the ref count to avoid use-after-free.
*/
if (!mod_delayed_work(deferredclose_wq,
&cfile->deferred, cifs_sb->ctx->acregmax))
cifsFileInfo_get(cfile);
} else {
/* Deferred close for files */
queue_delayed_work(deferredclose_wq,
&cfile->deferred, cifs_sb->ctx->acregmax);
cfile->deferred_close_scheduled = true;
spin_unlock(&cinode->deferred_lock);
return 0;
}
spin_unlock(&cinode->deferred_lock);
_cifsFileInfo_put(cfile, true, false);
} else {
_cifsFileInfo_put(cfile, true, false);
kfree(dclose);
}
}
/* return code from the ->release op is always ignored */
return 0;
}
void
cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
{
struct cifsFileInfo *open_file;
struct list_head *tmp;
struct list_head *tmp1;
struct list_head tmp_list;
if (!tcon->use_persistent || !tcon->need_reopen_files)
return;
tcon->need_reopen_files = false;
cifs_dbg(FYI, "Reopen persistent handles\n");
INIT_LIST_HEAD(&tmp_list);
/* list all files open on tree connection, reopen resilient handles */
spin_lock(&tcon->open_file_lock);
list_for_each(tmp, &tcon->openFileList) {
open_file = list_entry(tmp, struct cifsFileInfo, tlist);
if (!open_file->invalidHandle)
continue;
cifsFileInfo_get(open_file);
list_add_tail(&open_file->rlist, &tmp_list);
}
spin_unlock(&tcon->open_file_lock);
list_for_each_safe(tmp, tmp1, &tmp_list) {
open_file = list_entry(tmp, struct cifsFileInfo, rlist);
if (cifs_reopen_file(open_file, false /* do not flush */))
tcon->need_reopen_files = true;
list_del_init(&open_file->rlist);
cifsFileInfo_put(open_file);
}
}
int cifs_closedir(struct inode *inode, struct file *file)
{
int rc = 0;
unsigned int xid;
struct cifsFileInfo *cfile = file->private_data;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
char *buf;
cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
if (cfile == NULL)
return rc;
xid = get_xid();
tcon = tlink_tcon(cfile->tlink);
server = tcon->ses->server;
cifs_dbg(FYI, "Freeing private data in close dir\n");
spin_lock(&cfile->file_info_lock);
if (server->ops->dir_needs_close(cfile)) {
cfile->invalidHandle = true;
spin_unlock(&cfile->file_info_lock);
if (server->ops->close_dir)
rc = server->ops->close_dir(xid, tcon, &cfile->fid);
else
rc = -ENOSYS;
cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
/* not much we can do if it fails anyway, ignore rc */
rc = 0;
} else
spin_unlock(&cfile->file_info_lock);
buf = cfile->srch_inf.ntwrk_buf_start;
if (buf) {
cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
cfile->srch_inf.ntwrk_buf_start = NULL;
if (cfile->srch_inf.smallBuf)
cifs_small_buf_release(buf);
else
cifs_buf_release(buf);
}
cifs_put_tlink(cfile->tlink);
kfree(file->private_data);
file->private_data = NULL;
/* BB can we lock the filestruct while this is going on? */
free_xid(xid);
return rc;
}
static struct cifsLockInfo *
cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
{
struct cifsLockInfo *lock =
kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
if (!lock)
return lock;
lock->offset = offset;
lock->length = length;
lock->type = type;
lock->pid = current->tgid;
lock->flags = flags;
INIT_LIST_HEAD(&lock->blist);
init_waitqueue_head(&lock->block_q);
return lock;
}
void
cifs_del_lock_waiters(struct cifsLockInfo *lock)
{
struct cifsLockInfo *li, *tmp;
list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
list_del_init(&li->blist);
wake_up(&li->block_q);
}
}
#define CIFS_LOCK_OP 0
#define CIFS_READ_OP 1
#define CIFS_WRITE_OP 2
/* @rw_check : 0 - no op, 1 - read, 2 - write */
static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
__u64 length, __u8 type, __u16 flags,
struct cifsFileInfo *cfile,
struct cifsLockInfo **conf_lock, int rw_check)
{
struct cifsLockInfo *li;
struct cifsFileInfo *cur_cfile = fdlocks->cfile;
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
list_for_each_entry(li, &fdlocks->locks, llist) {
if (offset + length <= li->offset ||
offset >= li->offset + li->length)
continue;
if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
server->ops->compare_fids(cfile, cur_cfile)) {
/* shared lock prevents write op through the same fid */
if (!(li->type & server->vals->shared_lock_type) ||
rw_check != CIFS_WRITE_OP)
continue;
}
if ((type & server->vals->shared_lock_type) &&
((server->ops->compare_fids(cfile, cur_cfile) &&
current->tgid == li->pid) || type == li->type))
continue;
if (rw_check == CIFS_LOCK_OP &&
(flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
server->ops->compare_fids(cfile, cur_cfile))
continue;
if (conf_lock)
*conf_lock = li;
return true;
}
return false;
}
bool
cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
__u8 type, __u16 flags,
struct cifsLockInfo **conf_lock, int rw_check)
{
bool rc = false;
struct cifs_fid_locks *cur;
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
list_for_each_entry(cur, &cinode->llist, llist) {
rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
flags, cfile, conf_lock,
rw_check);
if (rc)
break;
}
return rc;
}
/*
* Check if there is another lock that prevents us to set the lock (mandatory
* style). If such a lock exists, update the flock structure with its
* properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
* or leave it the same if we can't. Returns 0 if we don't need to request to
* the server or 1 otherwise.
*/
static int
cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
__u8 type, struct file_lock *flock)
{
int rc = 0;
struct cifsLockInfo *conf_lock;
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
bool exist;
down_read(&cinode->lock_sem);
exist = cifs_find_lock_conflict(cfile, offset, length, type,
flock->fl_flags, &conf_lock,
CIFS_LOCK_OP);
if (exist) {
flock->fl_start = conf_lock->offset;
flock->fl_end = conf_lock->offset + conf_lock->length - 1;
flock->fl_pid = conf_lock->pid;
if (conf_lock->type & server->vals->shared_lock_type)
flock->fl_type = F_RDLCK;
else
flock->fl_type = F_WRLCK;
} else if (!cinode->can_cache_brlcks)
rc = 1;
else
flock->fl_type = F_UNLCK;
up_read(&cinode->lock_sem);
return rc;
}
static void
cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
{
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
cifs_down_write(&cinode->lock_sem);
list_add_tail(&lock->llist, &cfile->llist->locks);
up_write(&cinode->lock_sem);
}
/*
* Set the byte-range lock (mandatory style). Returns:
* 1) 0, if we set the lock and don't need to request to the server;
* 2) 1, if no locks prevent us but we need to request to the server;
* 3) -EACCES, if there is a lock that prevents us and wait is false.
*/
static int
cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
bool wait)
{
struct cifsLockInfo *conf_lock;
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
bool exist;
int rc = 0;
try_again:
exist = false;
cifs_down_write(&cinode->lock_sem);
exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
lock->type, lock->flags, &conf_lock,
CIFS_LOCK_OP);
if (!exist && cinode->can_cache_brlcks) {
list_add_tail(&lock->llist, &cfile->llist->locks);
up_write(&cinode->lock_sem);
return rc;
}
if (!exist)
rc = 1;
else if (!wait)
rc = -EACCES;
else {
list_add_tail(&lock->blist, &conf_lock->blist);
up_write(&cinode->lock_sem);
rc = wait_event_interruptible(lock->block_q,
(lock->blist.prev == &lock->blist) &&
(lock->blist.next == &lock->blist));
if (!rc)
goto try_again;
cifs_down_write(&cinode->lock_sem);
list_del_init(&lock->blist);
}
up_write(&cinode->lock_sem);
return rc;
}
/*
* Check if there is another lock that prevents us to set the lock (posix
* style). If such a lock exists, update the flock structure with its
* properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
* or leave it the same if we can't. Returns 0 if we don't need to request to
* the server or 1 otherwise.
*/
static int
cifs_posix_lock_test(struct file *file, struct file_lock *flock)
{
int rc = 0;
struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
unsigned char saved_type = flock->fl_type;
if ((flock->fl_flags & FL_POSIX) == 0)
return 1;
down_read(&cinode->lock_sem);
posix_test_lock(file, flock);
if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
flock->fl_type = saved_type;
rc = 1;
}
up_read(&cinode->lock_sem);
return rc;
}
/*
* Set the byte-range lock (posix style). Returns:
* 1) <0, if the error occurs while setting the lock;
* 2) 0, if we set the lock and don't need to request to the server;
* 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
* 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
*/
static int
cifs_posix_lock_set(struct file *file, struct file_lock *flock)
{
struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
int rc = FILE_LOCK_DEFERRED + 1;
if ((flock->fl_flags & FL_POSIX) == 0)
return rc;
cifs_down_write(&cinode->lock_sem);
if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem);
return rc;
}
rc = posix_lock_file(file, flock, NULL);
up_write(&cinode->lock_sem);
return rc;
}
int
cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
{
unsigned int xid;
int rc = 0, stored_rc;
struct cifsLockInfo *li, *tmp;
struct cifs_tcon *tcon;
unsigned int num, max_num, max_buf;
LOCKING_ANDX_RANGE *buf, *cur;
static const int types[] = {
LOCKING_ANDX_LARGE_FILES,
LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
};
int i;
xid = get_xid();
tcon = tlink_tcon(cfile->tlink);
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
* and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
free_xid(xid);
return -EINVAL;
}
BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
PAGE_SIZE);
max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
PAGE_SIZE);
max_num = (max_buf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
if (!buf) {
free_xid(xid);
return -ENOMEM;
}
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
if (li->type != types[i])
continue;
cur->Pid = cpu_to_le16(li->pid);
cur->LengthLow = cpu_to_le32((u32)li->length);
cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
cur->OffsetLow = cpu_to_le32((u32)li->offset);
cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
if (++num == max_num) {
stored_rc = cifs_lockv(xid, tcon,
cfile->fid.netfid,
(__u8)li->type, 0, num,
buf);
if (stored_rc)
rc = stored_rc;
cur = buf;
num = 0;
} else
cur++;
}
if (num) {
stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
(__u8)types[i], 0, num, buf);
if (stored_rc)
rc = stored_rc;
}
}
kfree(buf);
free_xid(xid);
return rc;
}
static __u32
hash_lockowner(fl_owner_t owner)
{
return cifs_lock_secret ^ hash32_ptr((const void *)owner);
}
struct lock_to_push {
struct list_head llist;
__u64 offset;
__u64 length;
__u32 pid;
__u16 netfid;
__u8 type;
};
static int
cifs_push_posix_locks(struct cifsFileInfo *cfile)
{
struct inode *inode = d_inode(cfile->dentry);
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct file_lock *flock;
struct file_lock_context *flctx = inode->i_flctx;
unsigned int count = 0, i;
int rc = 0, xid, type;
struct list_head locks_to_send, *el;
struct lock_to_push *lck, *tmp;
__u64 length;
xid = get_xid();
if (!flctx)
goto out;
spin_lock(&flctx->flc_lock);
list_for_each(el, &flctx->flc_posix) {
count++;
}
spin_unlock(&flctx->flc_lock);
INIT_LIST_HEAD(&locks_to_send);
/*
* Allocating count locks is enough because no FL_POSIX locks can be
* added to the list while we are holding cinode->lock_sem that
* protects locking operations of this inode.
*/
for (i = 0; i < count; i++) {
lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
if (!lck) {
rc = -ENOMEM;
goto err_out;
}
list_add_tail(&lck->llist, &locks_to_send);
}
el = locks_to_send.next;
spin_lock(&flctx->flc_lock);
list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
if (el == &locks_to_send) {
/*
* The list ended. We don't have enough allocated
* structures - something is really wrong.
*/
cifs_dbg(VFS, "Can't push all brlocks!\n");
break;
}
length = 1 + flock->fl_end - flock->fl_start;
if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
type = CIFS_RDLCK;
else
type = CIFS_WRLCK;
lck = list_entry(el, struct lock_to_push, llist);
lck->pid = hash_lockowner(flock->fl_owner);
lck->netfid = cfile->fid.netfid;
lck->length = length;
lck->type = type;
lck->offset = flock->fl_start;
}
spin_unlock(&flctx->flc_lock);
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
int stored_rc;
stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
lck->offset, lck->length, NULL,
lck->type, 0);
if (stored_rc)
rc = stored_rc;
list_del(&lck->llist);
kfree(lck);
}
out:
free_xid(xid);
return rc;
err_out:
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
list_del(&lck->llist);
kfree(lck);
}
goto out;
}
static int
cifs_push_locks(struct cifsFileInfo *cfile)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;
/* we are going to update can_cache_brlcks here - need a write access */
cifs_down_write(&cinode->lock_sem);
if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem);
return rc;
}
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
rc = cifs_push_posix_locks(cfile);
else
rc = tcon->ses->server->ops->push_mand_locks(cfile);
cinode->can_cache_brlcks = false;
up_write(&cinode->lock_sem);
return rc;
}
static void
cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
bool *wait_flag, struct TCP_Server_Info *server)
{
if (flock->fl_flags & FL_POSIX)
cifs_dbg(FYI, "Posix\n");
if (flock->fl_flags & FL_FLOCK)
cifs_dbg(FYI, "Flock\n");
if (flock->fl_flags & FL_SLEEP) {
cifs_dbg(FYI, "Blocking lock\n");
*wait_flag = true;
}
if (flock->fl_flags & FL_ACCESS)
cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
if (flock->fl_flags & FL_LEASE)
cifs_dbg(FYI, "Lease on file - not implemented yet\n");
if (flock->fl_flags &
(~(FL_POSIX | FL_FLOCK | FL_SLEEP |
FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
*type = server->vals->large_lock_type;
if (flock->fl_type == F_WRLCK) {
cifs_dbg(FYI, "F_WRLCK\n");
*type |= server->vals->exclusive_lock_type;
*lock = 1;
} else if (flock->fl_type == F_UNLCK) {
cifs_dbg(FYI, "F_UNLCK\n");
*type |= server->vals->unlock_lock_type;
*unlock = 1;
/* Check if unlock includes more than one lock range */
} else if (flock->fl_type == F_RDLCK) {
cifs_dbg(FYI, "F_RDLCK\n");
*type |= server->vals->shared_lock_type;
*lock = 1;
} else if (flock->fl_type == F_EXLCK) {
cifs_dbg(FYI, "F_EXLCK\n");
*type |= server->vals->exclusive_lock_type;
*lock = 1;
} else if (flock->fl_type == F_SHLCK) {
cifs_dbg(FYI, "F_SHLCK\n");
*type |= server->vals->shared_lock_type;
*lock = 1;
} else
cifs_dbg(FYI, "Unknown type of lock\n");
}
static int
cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
bool wait_flag, bool posix_lck, unsigned int xid)
{
int rc = 0;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
__u16 netfid = cfile->fid.netfid;
if (posix_lck) {
int posix_lock_type;
rc = cifs_posix_lock_test(file, flock);
if (!rc)
return rc;
if (type & server->vals->shared_lock_type)
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
rc = CIFSSMBPosixLock(xid, tcon, netfid,
hash_lockowner(flock->fl_owner),
flock->fl_start, length, flock,
posix_lock_type, wait_flag);
return rc;
}
rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
if (!rc)
return rc;
/* BB we could chain these into one lock request BB */
rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1, 0, false);
if (rc == 0) {
rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
type, 0, 1, false);
flock->fl_type = F_UNLCK;
if (rc != 0)
cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
rc);
return 0;
}
if (type & server->vals->shared_lock_type) {
flock->fl_type = F_WRLCK;
return 0;
}
type &= ~server->vals->exclusive_lock_type;
rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
type | server->vals->shared_lock_type,
1, 0, false);
if (rc == 0) {
rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
type | server->vals->shared_lock_type, 0, 1, false);
flock->fl_type = F_RDLCK;
if (rc != 0)
cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
rc);
} else
flock->fl_type = F_WRLCK;
return 0;
}
void
cifs_move_llist(struct list_head *source, struct list_head *dest)
{
struct list_head *li, *tmp;
list_for_each_safe(li, tmp, source)
list_move(li, dest);
}
void
cifs_free_llist(struct list_head *llist)
{
struct cifsLockInfo *li, *tmp;
list_for_each_entry_safe(li, tmp, llist, llist) {
cifs_del_lock_waiters(li);
list_del(&li->llist);
kfree(li);
}
}
int
cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
unsigned int xid)
{
int rc = 0, stored_rc;
static const int types[] = {
LOCKING_ANDX_LARGE_FILES,
LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
};
unsigned int i;
unsigned int max_num, num, max_buf;
LOCKING_ANDX_RANGE *buf, *cur;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifsLockInfo *li, *tmp;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct list_head tmp_llist;
INIT_LIST_HEAD(&tmp_llist);
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
* and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
return -EINVAL;
BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
PAGE_SIZE);
max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
PAGE_SIZE);
max_num = (max_buf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
if (!buf)
return -ENOMEM;
cifs_down_write(&cinode->lock_sem);
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
if (flock->fl_start > li->offset ||
(flock->fl_start + length) <
(li->offset + li->length))
continue;
if (current->tgid != li->pid)
continue;
if (types[i] != li->type)
continue;
if (cinode->can_cache_brlcks) {
/*
* We can cache brlock requests - simply remove
* a lock from the file's list.
*/
list_del(&li->llist);
cifs_del_lock_waiters(li);
kfree(li);
continue;
}
cur->Pid = cpu_to_le16(li->pid);
cur->LengthLow = cpu_to_le32((u32)li->length);
cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
cur->OffsetLow = cpu_to_le32((u32)li->offset);
cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
/*
* We need to save a lock here to let us add it again to
* the file's list if the unlock range request fails on
* the server.
*/
list_move(&li->llist, &tmp_llist);
if (++num == max_num) {
stored_rc = cifs_lockv(xid, tcon,
cfile->fid.netfid,
li->type, num, 0, buf);
if (stored_rc) {
/*
* We failed on the unlock range
* request - add all locks from the tmp
* list to the head of the file's list.
*/
cifs_move_llist(&tmp_llist,
&cfile->llist->locks);
rc = stored_rc;
} else
/*
* The unlock range request succeed -
* free the tmp list.
*/
cifs_free_llist(&tmp_llist);
cur = buf;
num = 0;
} else
cur++;
}
if (num) {
stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
types[i], num, 0, buf);
if (stored_rc) {
cifs_move_llist(&tmp_llist,
&cfile->llist->locks);
rc = stored_rc;
} else
cifs_free_llist(&tmp_llist);
}
}
up_write(&cinode->lock_sem);
kfree(buf);
return rc;
}
static int
cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
bool wait_flag, bool posix_lck, int lock, int unlock,
unsigned int xid)
{
int rc = 0;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct inode *inode = d_inode(cfile->dentry);
if (posix_lck) {
int posix_lock_type;
rc = cifs_posix_lock_set(file, flock);
if (rc <= FILE_LOCK_DEFERRED)
return rc;
if (type & server->vals->shared_lock_type)
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
if (unlock == 1)
posix_lock_type = CIFS_UNLCK;
rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
hash_lockowner(flock->fl_owner),
flock->fl_start, length,
NULL, posix_lock_type, wait_flag);
goto out;
}
if (lock) {
struct cifsLockInfo *lock;
lock = cifs_lock_init(flock->fl_start, length, type,
flock->fl_flags);
if (!lock)
return -ENOMEM;
rc = cifs_lock_add_if(cfile, lock, wait_flag);
if (rc < 0) {
kfree(lock);
return rc;
}
if (!rc)
goto out;
/*
* Windows 7 server can delay breaking lease from read to None
* if we set a byte-range lock on a file - break it explicitly
* before sending the lock to the server to be sure the next
* read won't conflict with non-overlapted locks due to
* pagereading.
*/
if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
CIFS_CACHE_READ(CIFS_I(inode))) {
cifs_zap_mapping(inode);
cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
inode);
CIFS_I(inode)->oplock = 0;
}
rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
type, 1, 0, wait_flag);
if (rc) {
kfree(lock);
return rc;
}
cifs_lock_add(cfile, lock);
} else if (unlock)
rc = server->ops->mand_unlock_range(cfile, flock, xid);
out:
if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
CIFS: fix POSIX lock leak and invalid ptr deref We have a customer reporting crashes in lock_get_status() with many "Leaked POSIX lock" messages preceeding the crash. Leaked POSIX lock on dev=0x0:0x56 ... Leaked POSIX lock on dev=0x0:0x56 ... Leaked POSIX lock on dev=0x0:0x56 ... Leaked POSIX lock on dev=0x0:0x53 ... Leaked POSIX lock on dev=0x0:0x53 ... Leaked POSIX lock on dev=0x0:0x53 ... Leaked POSIX lock on dev=0x0:0x53 ... POSIX: fl_owner=ffff8900e7b79380 fl_flags=0x1 fl_type=0x1 fl_pid=20709 Leaked POSIX lock on dev=0x0:0x4b ino... Leaked locks on dev=0x0:0x4b ino=0xf911400000029: POSIX: fl_owner=ffff89f41c870e00 fl_flags=0x1 fl_type=0x1 fl_pid=19592 stack segment: 0000 [#1] SMP Modules linked in: binfmt_misc msr tcp_diag udp_diag inet_diag unix_diag af_packet_diag netlink_diag rpcsec_gss_krb5 arc4 ecb auth_rpcgss nfsv4 md4 nfs nls_utf8 lockd grace cifs sunrpc ccm dns_resolver fscache af_packet iscsi_ibft iscsi_boot_sysfs vmw_vsock_vmci_transport vsock xfs libcrc32c sb_edac edac_core crct10dif_pclmul crc32_pclmul ghash_clmulni_intel drbg ansi_cprng vmw_balloon aesni_intel aes_x86_64 lrw gf128mul glue_helper ablk_helper cryptd joydev pcspkr vmxnet3 i2c_piix4 vmw_vmci shpchp fjes processor button ac btrfs xor raid6_pq sr_mod cdrom ata_generic sd_mod ata_piix vmwgfx crc32c_intel drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm serio_raw ahci libahci drm libata vmw_pvscsi sg dm_multipath dm_mod scsi_dh_rdac scsi_dh_emc scsi_dh_alua scsi_mod autofs4 Supported: Yes CPU: 6 PID: 28250 Comm: lsof Not tainted 4.4.156-94.64-default #1 Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 04/05/2016 task: ffff88a345f28740 ti: ffff88c74005c000 task.ti: ffff88c74005c000 RIP: 0010:[<ffffffff8125dcab>] [<ffffffff8125dcab>] lock_get_status+0x9b/0x3b0 RSP: 0018:ffff88c74005fd90 EFLAGS: 00010202 RAX: ffff89bde83e20ae RBX: ffff89e870003d18 RCX: 0000000049534f50 RDX: ffffffff81a3541f RSI: ffffffff81a3544e RDI: ffff89bde83e20ae RBP: 0026252423222120 R08: 0000000020584953 R09: 000000000000ffff R10: 0000000000000000 R11: ffff88c74005fc70 R12: ffff89e5ca7b1340 R13: 00000000000050e5 R14: ffff89e870003d30 R15: ffff89e5ca7b1340 FS: 00007fafd64be800(0000) GS:ffff89f41fd00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000001c80018 CR3: 000000a522048000 CR4: 0000000000360670 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Stack: 0000000000000208 ffffffff81a3d6b6 ffff89e870003d30 ffff89e870003d18 ffff89e5ca7b1340 ffff89f41738d7c0 ffff89e870003d30 ffff89e5ca7b1340 ffffffff8125e08f 0000000000000000 ffff89bc22b67d00 ffff88c74005ff28 Call Trace: [<ffffffff8125e08f>] locks_show+0x2f/0x70 [<ffffffff81230ad1>] seq_read+0x251/0x3a0 [<ffffffff81275bbc>] proc_reg_read+0x3c/0x70 [<ffffffff8120e456>] __vfs_read+0x26/0x140 [<ffffffff8120e9da>] vfs_read+0x7a/0x120 [<ffffffff8120faf2>] SyS_read+0x42/0xa0 [<ffffffff8161cbc3>] entry_SYSCALL_64_fastpath+0x1e/0xb7 When Linux closes a FD (close(), close-on-exec, dup2(), ...) it calls filp_close() which also removes all posix locks. The lock struct is initialized like so in filp_close() and passed down to cifs ... lock.fl_type = F_UNLCK; lock.fl_flags = FL_POSIX | FL_CLOSE; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; ... Note the FL_CLOSE flag, which hints the VFS code that this unlocking is done for closing the fd. filp_close() locks_remove_posix(filp, id); vfs_lock_file(filp, F_SETLK, &lock, NULL); return filp->f_op->lock(filp, cmd, fl) => cifs_lock() rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock, xid); rc = server->ops->mand_unlock_range(cfile, flock, xid); if (flock->fl_flags & FL_POSIX && !rc) rc = locks_lock_file_wait(file, flock) Notice how we don't call locks_lock_file_wait() which does the generic VFS lock/unlock/wait work on the inode if rc != 0. If we are closing the handle, the SMB server is supposed to remove any locks associated with it. Similarly, cifs.ko frees and wakes up any lock and lock waiter when closing the file: cifs_close() cifsFileInfo_put(file->private_data) /* * Delete any outstanding lock records. We'll lose them when the file * is closed anyway. */ down_write(&cifsi->lock_sem); list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { list_del(&li->llist); cifs_del_lock_waiters(li); kfree(li); } list_del(&cifs_file->llist->llist); kfree(cifs_file->llist); up_write(&cifsi->lock_sem); So we can safely ignore unlocking failures in cifs_lock() if they happen with the FL_CLOSE flag hint set as both the server and the client take care of it during the actual closing. This is not a proper fix for the unlocking failure but it's safe and it seems to prevent the lock leakages and crashes the customer experiences. Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: NeilBrown <neil@brown.name> Signed-off-by: Steve French <stfrench@microsoft.com> Acked-by: Pavel Shilovsky <pshilov@microsoft.com>
2019-03-14 17:44:16 +00:00
/*
* If this is a request to remove all locks because we
* are closing the file, it doesn't matter if the
* unlocking failed as both cifs.ko and the SMB server
* remove the lock on file close
*/
if (rc) {
cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
if (!(flock->fl_flags & FL_CLOSE))
return rc;
}
rc = locks_lock_file_wait(file, flock);
CIFS: fix POSIX lock leak and invalid ptr deref We have a customer reporting crashes in lock_get_status() with many "Leaked POSIX lock" messages preceeding the crash. Leaked POSIX lock on dev=0x0:0x56 ... Leaked POSIX lock on dev=0x0:0x56 ... Leaked POSIX lock on dev=0x0:0x56 ... Leaked POSIX lock on dev=0x0:0x53 ... Leaked POSIX lock on dev=0x0:0x53 ... Leaked POSIX lock on dev=0x0:0x53 ... Leaked POSIX lock on dev=0x0:0x53 ... POSIX: fl_owner=ffff8900e7b79380 fl_flags=0x1 fl_type=0x1 fl_pid=20709 Leaked POSIX lock on dev=0x0:0x4b ino... Leaked locks on dev=0x0:0x4b ino=0xf911400000029: POSIX: fl_owner=ffff89f41c870e00 fl_flags=0x1 fl_type=0x1 fl_pid=19592 stack segment: 0000 [#1] SMP Modules linked in: binfmt_misc msr tcp_diag udp_diag inet_diag unix_diag af_packet_diag netlink_diag rpcsec_gss_krb5 arc4 ecb auth_rpcgss nfsv4 md4 nfs nls_utf8 lockd grace cifs sunrpc ccm dns_resolver fscache af_packet iscsi_ibft iscsi_boot_sysfs vmw_vsock_vmci_transport vsock xfs libcrc32c sb_edac edac_core crct10dif_pclmul crc32_pclmul ghash_clmulni_intel drbg ansi_cprng vmw_balloon aesni_intel aes_x86_64 lrw gf128mul glue_helper ablk_helper cryptd joydev pcspkr vmxnet3 i2c_piix4 vmw_vmci shpchp fjes processor button ac btrfs xor raid6_pq sr_mod cdrom ata_generic sd_mod ata_piix vmwgfx crc32c_intel drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm serio_raw ahci libahci drm libata vmw_pvscsi sg dm_multipath dm_mod scsi_dh_rdac scsi_dh_emc scsi_dh_alua scsi_mod autofs4 Supported: Yes CPU: 6 PID: 28250 Comm: lsof Not tainted 4.4.156-94.64-default #1 Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 04/05/2016 task: ffff88a345f28740 ti: ffff88c74005c000 task.ti: ffff88c74005c000 RIP: 0010:[<ffffffff8125dcab>] [<ffffffff8125dcab>] lock_get_status+0x9b/0x3b0 RSP: 0018:ffff88c74005fd90 EFLAGS: 00010202 RAX: ffff89bde83e20ae RBX: ffff89e870003d18 RCX: 0000000049534f50 RDX: ffffffff81a3541f RSI: ffffffff81a3544e RDI: ffff89bde83e20ae RBP: 0026252423222120 R08: 0000000020584953 R09: 000000000000ffff R10: 0000000000000000 R11: ffff88c74005fc70 R12: ffff89e5ca7b1340 R13: 00000000000050e5 R14: ffff89e870003d30 R15: ffff89e5ca7b1340 FS: 00007fafd64be800(0000) GS:ffff89f41fd00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000001c80018 CR3: 000000a522048000 CR4: 0000000000360670 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Stack: 0000000000000208 ffffffff81a3d6b6 ffff89e870003d30 ffff89e870003d18 ffff89e5ca7b1340 ffff89f41738d7c0 ffff89e870003d30 ffff89e5ca7b1340 ffffffff8125e08f 0000000000000000 ffff89bc22b67d00 ffff88c74005ff28 Call Trace: [<ffffffff8125e08f>] locks_show+0x2f/0x70 [<ffffffff81230ad1>] seq_read+0x251/0x3a0 [<ffffffff81275bbc>] proc_reg_read+0x3c/0x70 [<ffffffff8120e456>] __vfs_read+0x26/0x140 [<ffffffff8120e9da>] vfs_read+0x7a/0x120 [<ffffffff8120faf2>] SyS_read+0x42/0xa0 [<ffffffff8161cbc3>] entry_SYSCALL_64_fastpath+0x1e/0xb7 When Linux closes a FD (close(), close-on-exec, dup2(), ...) it calls filp_close() which also removes all posix locks. The lock struct is initialized like so in filp_close() and passed down to cifs ... lock.fl_type = F_UNLCK; lock.fl_flags = FL_POSIX | FL_CLOSE; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; ... Note the FL_CLOSE flag, which hints the VFS code that this unlocking is done for closing the fd. filp_close() locks_remove_posix(filp, id); vfs_lock_file(filp, F_SETLK, &lock, NULL); return filp->f_op->lock(filp, cmd, fl) => cifs_lock() rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock, xid); rc = server->ops->mand_unlock_range(cfile, flock, xid); if (flock->fl_flags & FL_POSIX && !rc) rc = locks_lock_file_wait(file, flock) Notice how we don't call locks_lock_file_wait() which does the generic VFS lock/unlock/wait work on the inode if rc != 0. If we are closing the handle, the SMB server is supposed to remove any locks associated with it. Similarly, cifs.ko frees and wakes up any lock and lock waiter when closing the file: cifs_close() cifsFileInfo_put(file->private_data) /* * Delete any outstanding lock records. We'll lose them when the file * is closed anyway. */ down_write(&cifsi->lock_sem); list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { list_del(&li->llist); cifs_del_lock_waiters(li); kfree(li); } list_del(&cifs_file->llist->llist); kfree(cifs_file->llist); up_write(&cifsi->lock_sem); So we can safely ignore unlocking failures in cifs_lock() if they happen with the FL_CLOSE flag hint set as both the server and the client take care of it during the actual closing. This is not a proper fix for the unlocking failure but it's safe and it seems to prevent the lock leakages and crashes the customer experiences. Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: NeilBrown <neil@brown.name> Signed-off-by: Steve French <stfrench@microsoft.com> Acked-by: Pavel Shilovsky <pshilov@microsoft.com>
2019-03-14 17:44:16 +00:00
}
return rc;
}
int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
{
int rc, xid;
int lock = 0, unlock = 0;
bool wait_flag = false;
bool posix_lck = false;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
struct cifsFileInfo *cfile;
__u32 type;
rc = -EACCES;
xid = get_xid();
if (!(fl->fl_flags & FL_FLOCK))
return -ENOLCK;
cfile = (struct cifsFileInfo *)file->private_data;
tcon = tlink_tcon(cfile->tlink);
cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
tcon->ses->server);
cifs_sb = CIFS_FILE_SB(file);
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
posix_lck = true;
if (!lock && !unlock) {
/*
* if no lock or unlock then nothing to do since we do not
* know what it is
*/
free_xid(xid);
return -EOPNOTSUPP;
}
rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
xid);
free_xid(xid);
return rc;
}
int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
{
int rc, xid;
int lock = 0, unlock = 0;
bool wait_flag = false;
bool posix_lck = false;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
struct cifsFileInfo *cfile;
__u32 type;
rc = -EACCES;
xid = get_xid();
cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
cmd, flock->fl_flags, flock->fl_type,
flock->fl_start, flock->fl_end);
cfile = (struct cifsFileInfo *)file->private_data;
tcon = tlink_tcon(cfile->tlink);
cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
tcon->ses->server);
cifs_sb = CIFS_FILE_SB(file);
set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
posix_lck = true;
/*
* BB add code here to normalize offset and length to account for
* negative length which we can not accept over the wire.
*/
if (IS_GETLK(cmd)) {
rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
free_xid(xid);
return rc;
}
if (!lock && !unlock) {
/*
* if no lock or unlock then nothing to do since we do not
* know what it is
*/
free_xid(xid);
return -EOPNOTSUPP;
}
rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
xid);
free_xid(xid);
return rc;
}
/*
* update the file size (if needed) after a write. Should be called with
* the inode->i_lock held
*/
void
cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written)
{
loff_t end_of_write = offset + bytes_written;
if (end_of_write > cifsi->server_eof)
cifsi->server_eof = end_of_write;
}
static ssize_t
cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
size_t write_size, loff_t *offset)
{
int rc = 0;
unsigned int bytes_written = 0;
unsigned int total_written;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
unsigned int xid;
struct dentry *dentry = open_file->dentry;
struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
struct cifs_io_parms io_parms = {0};
cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
write_size, *offset, dentry);
tcon = tlink_tcon(open_file->tlink);
server = tcon->ses->server;
if (!server->ops->sync_write)
return -ENOSYS;
xid = get_xid();
for (total_written = 0; write_size > total_written;
total_written += bytes_written) {
rc = -EAGAIN;
while (rc == -EAGAIN) {
struct kvec iov[2];
unsigned int len;
if (open_file->invalidHandle) {
/* we could deadlock if we called
filemap_fdatawait from here so tell
reopen_file not to flush data to
server now */
rc = cifs_reopen_file(open_file, false);
if (rc != 0)
break;
}
len = min(server->ops->wp_retry_size(d_inode(dentry)),
(unsigned int)write_size - total_written);
/* iov[0] is reserved for smb header */
iov[1].iov_base = (char *)write_data + total_written;
iov[1].iov_len = len;
io_parms.pid = pid;
io_parms.tcon = tcon;
io_parms.offset = *offset;
io_parms.length = len;
rc = server->ops->sync_write(xid, &open_file->fid,
&io_parms, &bytes_written, iov, 1);
}
if (rc || (bytes_written == 0)) {
if (total_written)
break;
else {
free_xid(xid);
return rc;
}
} else {
spin_lock(&d_inode(dentry)->i_lock);
cifs_update_eof(cifsi, *offset, bytes_written);
spin_unlock(&d_inode(dentry)->i_lock);
*offset += bytes_written;
}
}
cifs_stats_bytes_written(tcon, total_written);
if (total_written > 0) {
spin_lock(&d_inode(dentry)->i_lock);
if (*offset > d_inode(dentry)->i_size) {
i_size_write(d_inode(dentry), *offset);
d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
}
spin_unlock(&d_inode(dentry)->i_lock);
}
mark_inode_dirty_sync(d_inode(dentry));
free_xid(xid);
return total_written;
}
struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
bool fsuid_only)
{
struct cifsFileInfo *open_file = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
/* only filter by fsuid on multiuser mounts */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic Commit 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect the openFileList, but missed a few places where cifs_inode->openFileList was enumerated. Change these remaining tcon->open_file_lock to cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change. [17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs] [17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40 [17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202 [17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340 [17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040 [17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40 [17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940 [17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428 [17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000 [17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0 [17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [17313.245759] PKRU: 55555554 [17313.245761] Call Trace: [17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs] [17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs] [17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0 [17313.245885] cifs_open+0x38f/0x990 [cifs] [17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs] [17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs] [17313.245960] ? do_dentry_open+0x132/0x330 [17313.245963] do_dentry_open+0x132/0x330 [17313.245969] path_openat+0x573/0x14d0 [17313.245974] do_filp_open+0x93/0x100 [17313.245979] ? __check_object_size+0xa3/0x181 [17313.245986] ? audit_alloc_name+0x7e/0xd0 [17313.245992] do_sys_open+0x184/0x220 [17313.245999] do_syscall_64+0x5b/0x1b0 Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") CC: Stable <stable@vger.kernel.org> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-10-03 05:16:27 +00:00
spin_lock(&cifs_inode->open_file_lock);
/* we could simply get the first_list_entry since write-only entries
are always at the end of the list but since the first entry might
have a close pending, we go through the whole list */
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
if ((!open_file->invalidHandle)) {
/* found a good file */
/* lock it so it will not be closed on us */
cifsFileInfo_get(open_file);
cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic Commit 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect the openFileList, but missed a few places where cifs_inode->openFileList was enumerated. Change these remaining tcon->open_file_lock to cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change. [17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs] [17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40 [17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202 [17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340 [17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040 [17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40 [17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940 [17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428 [17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000 [17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0 [17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [17313.245759] PKRU: 55555554 [17313.245761] Call Trace: [17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs] [17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs] [17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0 [17313.245885] cifs_open+0x38f/0x990 [cifs] [17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs] [17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs] [17313.245960] ? do_dentry_open+0x132/0x330 [17313.245963] do_dentry_open+0x132/0x330 [17313.245969] path_openat+0x573/0x14d0 [17313.245974] do_filp_open+0x93/0x100 [17313.245979] ? __check_object_size+0xa3/0x181 [17313.245986] ? audit_alloc_name+0x7e/0xd0 [17313.245992] do_sys_open+0x184/0x220 [17313.245999] do_syscall_64+0x5b/0x1b0 Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") CC: Stable <stable@vger.kernel.org> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-10-03 05:16:27 +00:00
spin_unlock(&cifs_inode->open_file_lock);
return open_file;
} /* else might as well continue, and look for
another, or simply have the caller reopen it
again rather than trying to fix this handle */
} else /* write only file */
break; /* write only files are last so must be done */
}
cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic Commit 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect the openFileList, but missed a few places where cifs_inode->openFileList was enumerated. Change these remaining tcon->open_file_lock to cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change. [17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs] [17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40 [17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202 [17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340 [17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040 [17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40 [17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940 [17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428 [17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000 [17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0 [17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [17313.245759] PKRU: 55555554 [17313.245761] Call Trace: [17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs] [17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs] [17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0 [17313.245885] cifs_open+0x38f/0x990 [cifs] [17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs] [17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs] [17313.245960] ? do_dentry_open+0x132/0x330 [17313.245963] do_dentry_open+0x132/0x330 [17313.245969] path_openat+0x573/0x14d0 [17313.245974] do_filp_open+0x93/0x100 [17313.245979] ? __check_object_size+0xa3/0x181 [17313.245986] ? audit_alloc_name+0x7e/0xd0 [17313.245992] do_sys_open+0x184/0x220 [17313.245999] do_syscall_64+0x5b/0x1b0 Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") CC: Stable <stable@vger.kernel.org> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-10-03 05:16:27 +00:00
spin_unlock(&cifs_inode->open_file_lock);
return NULL;
}
/* Return -EBADF if no handle is found and general rc otherwise */
int
cifs: fix rename() by ensuring source handle opened with DELETE bit To rename a file in SMB2 we open it with the DELETE access and do a special SetInfo on it. If the handle is missing the DELETE bit the server will fail the SetInfo with STATUS_ACCESS_DENIED. We currently try to reuse any existing opened handle we have with cifs_get_writable_path(). That function looks for handles with WRITE access but doesn't check for DELETE, making rename() fail if it finds a handle to reuse. Simple reproducer below. To select handles with the DELETE bit, this patch adds a flag argument to cifs_get_writable_path() and find_writable_file() and the existing 'bool fsuid_only' argument is converted to a flag. The cifsFileInfo struct only stores the UNIX open mode but not the original SMB access flags. Since the DELETE bit is not mapped in that mode, this patch stores the access mask in cifs_fid on file open, which is accessible from cifsFileInfo. Simple reproducer: #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #define E(s) perror(s), exit(1) int main(int argc, char *argv[]) { int fd, ret; if (argc != 3) { fprintf(stderr, "Usage: %s A B\n" "create&open A in write mode, " "rename A to B, close A\n", argv[0]); return 0; } fd = openat(AT_FDCWD, argv[1], O_WRONLY|O_CREAT|O_SYNC, 0666); if (fd == -1) E("openat()"); ret = rename(argv[1], argv[2]); if (ret) E("rename()"); ret = close(fd); if (ret) E("close()"); return ret; } $ gcc -o bugrename bugrename.c $ ./bugrename /mnt/a /mnt/b rename(): Permission denied Fixes: 8de9e86c67ba ("cifs: create a helper to find a writeable handle by path name") CC: Stable <stable@vger.kernel.org> Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
2020-02-21 10:19:06 +00:00
cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
struct cifsFileInfo **ret_file)
{
struct cifsFileInfo *open_file, *inv_file = NULL;
struct cifs_sb_info *cifs_sb;
bool any_available = false;
int rc = -EBADF;
unsigned int refind = 0;
cifs: fix rename() by ensuring source handle opened with DELETE bit To rename a file in SMB2 we open it with the DELETE access and do a special SetInfo on it. If the handle is missing the DELETE bit the server will fail the SetInfo with STATUS_ACCESS_DENIED. We currently try to reuse any existing opened handle we have with cifs_get_writable_path(). That function looks for handles with WRITE access but doesn't check for DELETE, making rename() fail if it finds a handle to reuse. Simple reproducer below. To select handles with the DELETE bit, this patch adds a flag argument to cifs_get_writable_path() and find_writable_file() and the existing 'bool fsuid_only' argument is converted to a flag. The cifsFileInfo struct only stores the UNIX open mode but not the original SMB access flags. Since the DELETE bit is not mapped in that mode, this patch stores the access mask in cifs_fid on file open, which is accessible from cifsFileInfo. Simple reproducer: #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #define E(s) perror(s), exit(1) int main(int argc, char *argv[]) { int fd, ret; if (argc != 3) { fprintf(stderr, "Usage: %s A B\n" "create&open A in write mode, " "rename A to B, close A\n", argv[0]); return 0; } fd = openat(AT_FDCWD, argv[1], O_WRONLY|O_CREAT|O_SYNC, 0666); if (fd == -1) E("openat()"); ret = rename(argv[1], argv[2]); if (ret) E("rename()"); ret = close(fd); if (ret) E("close()"); return ret; } $ gcc -o bugrename bugrename.c $ ./bugrename /mnt/a /mnt/b rename(): Permission denied Fixes: 8de9e86c67ba ("cifs: create a helper to find a writeable handle by path name") CC: Stable <stable@vger.kernel.org> Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
2020-02-21 10:19:06 +00:00
bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
bool with_delete = flags & FIND_WR_WITH_DELETE;
*ret_file = NULL;
/*
* Having a null inode here (because mapping->host was set to zero by
* the VFS or MM) should not happen but we had reports of on oops (due
* to it being zero) during stress testcases so we need to check for it
*/
if (cifs_inode == NULL) {
cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
dump_stack();
return rc;
}
cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
/* only filter by fsuid on multiuser mounts */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic Commit 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect the openFileList, but missed a few places where cifs_inode->openFileList was enumerated. Change these remaining tcon->open_file_lock to cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change. [17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs] [17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40 [17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202 [17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340 [17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040 [17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40 [17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940 [17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428 [17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000 [17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0 [17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [17313.245759] PKRU: 55555554 [17313.245761] Call Trace: [17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs] [17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs] [17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0 [17313.245885] cifs_open+0x38f/0x990 [cifs] [17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs] [17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs] [17313.245960] ? do_dentry_open+0x132/0x330 [17313.245963] do_dentry_open+0x132/0x330 [17313.245969] path_openat+0x573/0x14d0 [17313.245974] do_filp_open+0x93/0x100 [17313.245979] ? __check_object_size+0xa3/0x181 [17313.245986] ? audit_alloc_name+0x7e/0xd0 [17313.245992] do_sys_open+0x184/0x220 [17313.245999] do_syscall_64+0x5b/0x1b0 Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") CC: Stable <stable@vger.kernel.org> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-10-03 05:16:27 +00:00
spin_lock(&cifs_inode->open_file_lock);
refind_writable:
if (refind > MAX_REOPEN_ATT) {
cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic Commit 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect the openFileList, but missed a few places where cifs_inode->openFileList was enumerated. Change these remaining tcon->open_file_lock to cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change. [17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs] [17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40 [17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202 [17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340 [17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040 [17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40 [17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940 [17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428 [17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000 [17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0 [17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [17313.245759] PKRU: 55555554 [17313.245761] Call Trace: [17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs] [17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs] [17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0 [17313.245885] cifs_open+0x38f/0x990 [cifs] [17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs] [17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs] [17313.245960] ? do_dentry_open+0x132/0x330 [17313.245963] do_dentry_open+0x132/0x330 [17313.245969] path_openat+0x573/0x14d0 [17313.245974] do_filp_open+0x93/0x100 [17313.245979] ? __check_object_size+0xa3/0x181 [17313.245986] ? audit_alloc_name+0x7e/0xd0 [17313.245992] do_sys_open+0x184/0x220 [17313.245999] do_syscall_64+0x5b/0x1b0 Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") CC: Stable <stable@vger.kernel.org> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-10-03 05:16:27 +00:00
spin_unlock(&cifs_inode->open_file_lock);
return rc;
}
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (!any_available && open_file->pid != current->tgid)
continue;
if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
continue;
cifs: fix rename() by ensuring source handle opened with DELETE bit To rename a file in SMB2 we open it with the DELETE access and do a special SetInfo on it. If the handle is missing the DELETE bit the server will fail the SetInfo with STATUS_ACCESS_DENIED. We currently try to reuse any existing opened handle we have with cifs_get_writable_path(). That function looks for handles with WRITE access but doesn't check for DELETE, making rename() fail if it finds a handle to reuse. Simple reproducer below. To select handles with the DELETE bit, this patch adds a flag argument to cifs_get_writable_path() and find_writable_file() and the existing 'bool fsuid_only' argument is converted to a flag. The cifsFileInfo struct only stores the UNIX open mode but not the original SMB access flags. Since the DELETE bit is not mapped in that mode, this patch stores the access mask in cifs_fid on file open, which is accessible from cifsFileInfo. Simple reproducer: #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #define E(s) perror(s), exit(1) int main(int argc, char *argv[]) { int fd, ret; if (argc != 3) { fprintf(stderr, "Usage: %s A B\n" "create&open A in write mode, " "rename A to B, close A\n", argv[0]); return 0; } fd = openat(AT_FDCWD, argv[1], O_WRONLY|O_CREAT|O_SYNC, 0666); if (fd == -1) E("openat()"); ret = rename(argv[1], argv[2]); if (ret) E("rename()"); ret = close(fd); if (ret) E("close()"); return ret; } $ gcc -o bugrename bugrename.c $ ./bugrename /mnt/a /mnt/b rename(): Permission denied Fixes: 8de9e86c67ba ("cifs: create a helper to find a writeable handle by path name") CC: Stable <stable@vger.kernel.org> Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
2020-02-21 10:19:06 +00:00
if (with_delete && !(open_file->fid.access & DELETE))
continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
if (!open_file->invalidHandle) {
/* found a good writable file */
cifsFileInfo_get(open_file);
cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic Commit 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect the openFileList, but missed a few places where cifs_inode->openFileList was enumerated. Change these remaining tcon->open_file_lock to cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change. [17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs] [17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40 [17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202 [17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340 [17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040 [17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40 [17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940 [17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428 [17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000 [17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0 [17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [17313.245759] PKRU: 55555554 [17313.245761] Call Trace: [17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs] [17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs] [17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0 [17313.245885] cifs_open+0x38f/0x990 [cifs] [17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs] [17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs] [17313.245960] ? do_dentry_open+0x132/0x330 [17313.245963] do_dentry_open+0x132/0x330 [17313.245969] path_openat+0x573/0x14d0 [17313.245974] do_filp_open+0x93/0x100 [17313.245979] ? __check_object_size+0xa3/0x181 [17313.245986] ? audit_alloc_name+0x7e/0xd0 [17313.245992] do_sys_open+0x184/0x220 [17313.245999] do_syscall_64+0x5b/0x1b0 Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") CC: Stable <stable@vger.kernel.org> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-10-03 05:16:27 +00:00
spin_unlock(&cifs_inode->open_file_lock);
*ret_file = open_file;
return 0;
} else {
if (!inv_file)
inv_file = open_file;
}
}
}
/* couldn't find useable FH with same pid, try any available */
if (!any_available) {
any_available = true;
goto refind_writable;
}
if (inv_file) {
any_available = false;
cifsFileInfo_get(inv_file);
}
cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic Commit 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect the openFileList, but missed a few places where cifs_inode->openFileList was enumerated. Change these remaining tcon->open_file_lock to cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change. [17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs] [17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40 [17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202 [17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340 [17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040 [17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40 [17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940 [17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428 [17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000 [17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0 [17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [17313.245759] PKRU: 55555554 [17313.245761] Call Trace: [17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs] [17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs] [17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0 [17313.245885] cifs_open+0x38f/0x990 [cifs] [17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs] [17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs] [17313.245960] ? do_dentry_open+0x132/0x330 [17313.245963] do_dentry_open+0x132/0x330 [17313.245969] path_openat+0x573/0x14d0 [17313.245974] do_filp_open+0x93/0x100 [17313.245979] ? __check_object_size+0xa3/0x181 [17313.245986] ? audit_alloc_name+0x7e/0xd0 [17313.245992] do_sys_open+0x184/0x220 [17313.245999] do_syscall_64+0x5b/0x1b0 Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") CC: Stable <stable@vger.kernel.org> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-10-03 05:16:27 +00:00
spin_unlock(&cifs_inode->open_file_lock);
if (inv_file) {
rc = cifs_reopen_file(inv_file, false);
if (!rc) {
*ret_file = inv_file;
return 0;
}
spin_lock(&cifs_inode->open_file_lock);
list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
spin_unlock(&cifs_inode->open_file_lock);
cifsFileInfo_put(inv_file);
++refind;
inv_file = NULL;
cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic Commit 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect the openFileList, but missed a few places where cifs_inode->openFileList was enumerated. Change these remaining tcon->open_file_lock to cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change. [17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs] [17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40 [17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202 [17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340 [17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040 [17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40 [17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940 [17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428 [17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000 [17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0 [17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [17313.245759] PKRU: 55555554 [17313.245761] Call Trace: [17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs] [17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs] [17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0 [17313.245885] cifs_open+0x38f/0x990 [cifs] [17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs] [17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs] [17313.245960] ? do_dentry_open+0x132/0x330 [17313.245963] do_dentry_open+0x132/0x330 [17313.245969] path_openat+0x573/0x14d0 [17313.245974] do_filp_open+0x93/0x100 [17313.245979] ? __check_object_size+0xa3/0x181 [17313.245986] ? audit_alloc_name+0x7e/0xd0 [17313.245992] do_sys_open+0x184/0x220 [17313.245999] do_syscall_64+0x5b/0x1b0 Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") CC: Stable <stable@vger.kernel.org> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-10-03 05:16:27 +00:00
spin_lock(&cifs_inode->open_file_lock);
goto refind_writable;
}
return rc;
}
struct cifsFileInfo *
cifs: fix rename() by ensuring source handle opened with DELETE bit To rename a file in SMB2 we open it with the DELETE access and do a special SetInfo on it. If the handle is missing the DELETE bit the server will fail the SetInfo with STATUS_ACCESS_DENIED. We currently try to reuse any existing opened handle we have with cifs_get_writable_path(). That function looks for handles with WRITE access but doesn't check for DELETE, making rename() fail if it finds a handle to reuse. Simple reproducer below. To select handles with the DELETE bit, this patch adds a flag argument to cifs_get_writable_path() and find_writable_file() and the existing 'bool fsuid_only' argument is converted to a flag. The cifsFileInfo struct only stores the UNIX open mode but not the original SMB access flags. Since the DELETE bit is not mapped in that mode, this patch stores the access mask in cifs_fid on file open, which is accessible from cifsFileInfo. Simple reproducer: #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #define E(s) perror(s), exit(1) int main(int argc, char *argv[]) { int fd, ret; if (argc != 3) { fprintf(stderr, "Usage: %s A B\n" "create&open A in write mode, " "rename A to B, close A\n", argv[0]); return 0; } fd = openat(AT_FDCWD, argv[1], O_WRONLY|O_CREAT|O_SYNC, 0666); if (fd == -1) E("openat()"); ret = rename(argv[1], argv[2]); if (ret) E("rename()"); ret = close(fd); if (ret) E("close()"); return ret; } $ gcc -o bugrename bugrename.c $ ./bugrename /mnt/a /mnt/b rename(): Permission denied Fixes: 8de9e86c67ba ("cifs: create a helper to find a writeable handle by path name") CC: Stable <stable@vger.kernel.org> Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
2020-02-21 10:19:06 +00:00
find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
{
struct cifsFileInfo *cfile;
int rc;
cifs: fix rename() by ensuring source handle opened with DELETE bit To rename a file in SMB2 we open it with the DELETE access and do a special SetInfo on it. If the handle is missing the DELETE bit the server will fail the SetInfo with STATUS_ACCESS_DENIED. We currently try to reuse any existing opened handle we have with cifs_get_writable_path(). That function looks for handles with WRITE access but doesn't check for DELETE, making rename() fail if it finds a handle to reuse. Simple reproducer below. To select handles with the DELETE bit, this patch adds a flag argument to cifs_get_writable_path() and find_writable_file() and the existing 'bool fsuid_only' argument is converted to a flag. The cifsFileInfo struct only stores the UNIX open mode but not the original SMB access flags. Since the DELETE bit is not mapped in that mode, this patch stores the access mask in cifs_fid on file open, which is accessible from cifsFileInfo. Simple reproducer: #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #define E(s) perror(s), exit(1) int main(int argc, char *argv[]) { int fd, ret; if (argc != 3) { fprintf(stderr, "Usage: %s A B\n" "create&open A in write mode, " "rename A to B, close A\n", argv[0]); return 0; } fd = openat(AT_FDCWD, argv[1], O_WRONLY|O_CREAT|O_SYNC, 0666); if (fd == -1) E("openat()"); ret = rename(argv[1], argv[2]); if (ret) E("rename()"); ret = close(fd); if (ret) E("close()"); return ret; } $ gcc -o bugrename bugrename.c $ ./bugrename /mnt/a /mnt/b rename(): Permission denied Fixes: 8de9e86c67ba ("cifs: create a helper to find a writeable handle by path name") CC: Stable <stable@vger.kernel.org> Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
2020-02-21 10:19:06 +00:00
rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
if (rc)
cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
return cfile;
}
int
cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
cifs: fix rename() by ensuring source handle opened with DELETE bit To rename a file in SMB2 we open it with the DELETE access and do a special SetInfo on it. If the handle is missing the DELETE bit the server will fail the SetInfo with STATUS_ACCESS_DENIED. We currently try to reuse any existing opened handle we have with cifs_get_writable_path(). That function looks for handles with WRITE access but doesn't check for DELETE, making rename() fail if it finds a handle to reuse. Simple reproducer below. To select handles with the DELETE bit, this patch adds a flag argument to cifs_get_writable_path() and find_writable_file() and the existing 'bool fsuid_only' argument is converted to a flag. The cifsFileInfo struct only stores the UNIX open mode but not the original SMB access flags. Since the DELETE bit is not mapped in that mode, this patch stores the access mask in cifs_fid on file open, which is accessible from cifsFileInfo. Simple reproducer: #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #define E(s) perror(s), exit(1) int main(int argc, char *argv[]) { int fd, ret; if (argc != 3) { fprintf(stderr, "Usage: %s A B\n" "create&open A in write mode, " "rename A to B, close A\n", argv[0]); return 0; } fd = openat(AT_FDCWD, argv[1], O_WRONLY|O_CREAT|O_SYNC, 0666); if (fd == -1) E("openat()"); ret = rename(argv[1], argv[2]); if (ret) E("rename()"); ret = close(fd); if (ret) E("close()"); return ret; } $ gcc -o bugrename bugrename.c $ ./bugrename /mnt/a /mnt/b rename(): Permission denied Fixes: 8de9e86c67ba ("cifs: create a helper to find a writeable handle by path name") CC: Stable <stable@vger.kernel.org> Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
2020-02-21 10:19:06 +00:00
int flags,
struct cifsFileInfo **ret_file)
{
struct cifsFileInfo *cfile;
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
void *page = alloc_dentry_path();
*ret_file = NULL;
spin_lock(&tcon->open_file_lock);
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
list_for_each_entry(cfile, &tcon->openFileList, tlist) {
struct cifsInodeInfo *cinode;
const char *full_path = build_path_from_dentry(cfile->dentry, page);
if (IS_ERR(full_path)) {
spin_unlock(&tcon->open_file_lock);
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
free_dentry_path(page);
return PTR_ERR(full_path);
}
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
if (strcmp(full_path, name))
continue;
cinode = CIFS_I(d_inode(cfile->dentry));
spin_unlock(&tcon->open_file_lock);
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
free_dentry_path(page);
cifs: fix rename() by ensuring source handle opened with DELETE bit To rename a file in SMB2 we open it with the DELETE access and do a special SetInfo on it. If the handle is missing the DELETE bit the server will fail the SetInfo with STATUS_ACCESS_DENIED. We currently try to reuse any existing opened handle we have with cifs_get_writable_path(). That function looks for handles with WRITE access but doesn't check for DELETE, making rename() fail if it finds a handle to reuse. Simple reproducer below. To select handles with the DELETE bit, this patch adds a flag argument to cifs_get_writable_path() and find_writable_file() and the existing 'bool fsuid_only' argument is converted to a flag. The cifsFileInfo struct only stores the UNIX open mode but not the original SMB access flags. Since the DELETE bit is not mapped in that mode, this patch stores the access mask in cifs_fid on file open, which is accessible from cifsFileInfo. Simple reproducer: #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #define E(s) perror(s), exit(1) int main(int argc, char *argv[]) { int fd, ret; if (argc != 3) { fprintf(stderr, "Usage: %s A B\n" "create&open A in write mode, " "rename A to B, close A\n", argv[0]); return 0; } fd = openat(AT_FDCWD, argv[1], O_WRONLY|O_CREAT|O_SYNC, 0666); if (fd == -1) E("openat()"); ret = rename(argv[1], argv[2]); if (ret) E("rename()"); ret = close(fd); if (ret) E("close()"); return ret; } $ gcc -o bugrename bugrename.c $ ./bugrename /mnt/a /mnt/b rename(): Permission denied Fixes: 8de9e86c67ba ("cifs: create a helper to find a writeable handle by path name") CC: Stable <stable@vger.kernel.org> Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
2020-02-21 10:19:06 +00:00
return cifs_get_writable_file(cinode, flags, ret_file);
}
spin_unlock(&tcon->open_file_lock);
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
free_dentry_path(page);
return -ENOENT;
}
int
cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
struct cifsFileInfo **ret_file)
{
struct cifsFileInfo *cfile;
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
void *page = alloc_dentry_path();
*ret_file = NULL;
spin_lock(&tcon->open_file_lock);
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
list_for_each_entry(cfile, &tcon->openFileList, tlist) {
struct cifsInodeInfo *cinode;
const char *full_path = build_path_from_dentry(cfile->dentry, page);
if (IS_ERR(full_path)) {
spin_unlock(&tcon->open_file_lock);
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
free_dentry_path(page);
return PTR_ERR(full_path);
}
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
if (strcmp(full_path, name))
continue;
cinode = CIFS_I(d_inode(cfile->dentry));
spin_unlock(&tcon->open_file_lock);
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
free_dentry_path(page);
*ret_file = find_readable_file(cinode, 0);
return *ret_file ? 0 : -ENOENT;
}
spin_unlock(&tcon->open_file_lock);
cifs: allocate buffer in the caller of build_path_from_dentry() build_path_from_dentry() open-codes dentry_path_raw(). The reason we can't use dentry_path_raw() in there (and postprocess the result as needed) is that the callers of build_path_from_dentry() expect that the object to be freed on cleanup and the string to be used are at the same address. That's painful, since the path is naturally built end-to-beginning - we start at the leaf and go through the ancestors, accumulating the pathname. Life would be easier if we left the buffer allocation to callers. It wouldn't be exact-sized buffer, but none of the callers keep the result for long - it's always freed before the caller returns. So there's no need to do exact-sized allocation; better use __getname()/__putname(), same as we do for pathname arguments of syscalls. What's more, there's no need to do allocation under spinlocks, so GFP_ATOMIC is not needed. Next patch will replace the open-coded dentry_path_raw() (in build_path_from_dentry_optional_prefix()) with calling the real thing. This patch only introduces wrappers for allocating/freeing the buffers and switches to new calling conventions: build_path_from_dentry(dentry, buf) expects buf to be address of a page-sized object or NULL, return value is a pathname built inside that buffer on success, ERR_PTR(-ENOMEM) if buf is NULL and ERR_PTR(-ENAMETOOLONG) if the pathname won't fit into page. Note that we don't need to check for failure when allocating the buffer in the caller - build_path_from_dentry() will do the right thing. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-05 22:36:04 +00:00
free_dentry_path(page);
return -ENOENT;
}
static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
{
struct address_space *mapping = page->mapping;
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
loff_t offset = (loff_t)page->index << PAGE_SHIFT;
char *write_data;
int rc = -EFAULT;
int bytes_written = 0;
struct inode *inode;
struct cifsFileInfo *open_file;
if (!mapping || !mapping->host)
return -EFAULT;
inode = page->mapping->host;
offset += (loff_t)from;
write_data = kmap(page);
write_data += from;
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
if ((to > PAGE_SIZE) || (from > to)) {
kunmap(page);
return -EIO;
}
/* racing with truncate? */
if (offset > mapping->host->i_size) {
kunmap(page);
return 0; /* don't care */
}
/* check to make sure that we are not extending the file */
if (mapping->host->i_size - offset < (loff_t)to)
to = (unsigned)(mapping->host->i_size - offset);
cifs: fix rename() by ensuring source handle opened with DELETE bit To rename a file in SMB2 we open it with the DELETE access and do a special SetInfo on it. If the handle is missing the DELETE bit the server will fail the SetInfo with STATUS_ACCESS_DENIED. We currently try to reuse any existing opened handle we have with cifs_get_writable_path(). That function looks for handles with WRITE access but doesn't check for DELETE, making rename() fail if it finds a handle to reuse. Simple reproducer below. To select handles with the DELETE bit, this patch adds a flag argument to cifs_get_writable_path() and find_writable_file() and the existing 'bool fsuid_only' argument is converted to a flag. The cifsFileInfo struct only stores the UNIX open mode but not the original SMB access flags. Since the DELETE bit is not mapped in that mode, this patch stores the access mask in cifs_fid on file open, which is accessible from cifsFileInfo. Simple reproducer: #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #define E(s) perror(s), exit(1) int main(int argc, char *argv[]) { int fd, ret; if (argc != 3) { fprintf(stderr, "Usage: %s A B\n" "create&open A in write mode, " "rename A to B, close A\n", argv[0]); return 0; } fd = openat(AT_FDCWD, argv[1], O_WRONLY|O_CREAT|O_SYNC, 0666); if (fd == -1) E("openat()"); ret = rename(argv[1], argv[2]); if (ret) E("rename()"); ret = close(fd); if (ret) E("close()"); return ret; } $ gcc -o bugrename bugrename.c $ ./bugrename /mnt/a /mnt/b rename(): Permission denied Fixes: 8de9e86c67ba ("cifs: create a helper to find a writeable handle by path name") CC: Stable <stable@vger.kernel.org> Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
2020-02-21 10:19:06 +00:00
rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
&open_file);
if (!rc) {
bytes_written = cifs_write(open_file, open_file->pid,
write_data, to - from, &offset);
cifsFileInfo_put(open_file);
/* Does mm or vfs already set times? */
inode->i_atime = inode->i_mtime = current_time(inode);
if ((bytes_written > 0) && (offset))
rc = 0;
else if (bytes_written < 0)
rc = bytes_written;
else
rc = -EFAULT;
} else {
cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
if (!is_retryable_error(rc))
rc = -EIO;
}
kunmap(page);
return rc;
}
static struct cifs_writedata *
wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
pgoff_t end, pgoff_t *index,
unsigned int *found_pages)
{
struct cifs_writedata *wdata;
wdata = cifs_writedata_alloc((unsigned int)tofind,
cifs_writev_complete);
if (!wdata)
return NULL;
*found_pages = find_get_pages_range_tag(mapping, index, end,
PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
return wdata;
}
static unsigned int
wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
struct address_space *mapping,
struct writeback_control *wbc,
pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
{
unsigned int nr_pages = 0, i;
struct page *page;
for (i = 0; i < found_pages; i++) {
page = wdata->pages[i];
/*
* At this point we hold neither the i_pages lock nor the
* page lock: the page may be truncated or invalidated
* (changing page->mapping to NULL), or even swizzled
* back from swapper_space to tmpfs file mapping
*/
if (nr_pages == 0)
lock_page(page);
else if (!trylock_page(page))
break;
if (unlikely(page->mapping != mapping)) {
unlock_page(page);
break;
}
if (!wbc->range_cyclic && page->index > end) {
*done = true;
unlock_page(page);
break;
}
if (*next && (page->index != *next)) {
/* Not next consecutive page */
unlock_page(page);
break;
}
if (wbc->sync_mode != WB_SYNC_NONE)
wait_on_page_writeback(page);
if (PageWriteback(page) ||
!clear_page_dirty_for_io(page)) {
unlock_page(page);
break;
}
/*
* This actually clears the dirty bit in the radix tree.
* See cifs_writepage() for more commentary.
*/
set_page_writeback(page);
if (page_offset(page) >= i_size_read(mapping->host)) {
*done = true;
unlock_page(page);
end_page_writeback(page);
break;
}
wdata->pages[i] = page;
*next = page->index + 1;
++nr_pages;
}
/* reset index to refind any pages skipped */
if (nr_pages == 0)
*index = wdata->pages[0]->index + 1;
/* put any pages we aren't going to use */
for (i = nr_pages; i < found_pages; i++) {
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
put_page(wdata->pages[i]);
wdata->pages[i] = NULL;
}
return nr_pages;
}
static int
wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
struct address_space *mapping, struct writeback_control *wbc)
{
int rc;
wdata->sync_mode = wbc->sync_mode;
wdata->nr_pages = nr_pages;
wdata->offset = page_offset(wdata->pages[0]);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
wdata->pagesz = PAGE_SIZE;
wdata->tailsz = min(i_size_read(mapping->host) -
page_offset(wdata->pages[nr_pages - 1]),
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
(loff_t)PAGE_SIZE);
wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
wdata->pid = wdata->cfile->pid;
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
if (rc)
return rc;
if (wdata->cfile->invalidHandle)
rc = -EAGAIN;
else
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
rc = wdata->server->ops->async_writev(wdata,
cifs_writedata_release);
return rc;
}
static int cifs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct TCP_Server_Info *server;
bool done = false, scanned = false, range_whole = false;
pgoff_t end, index;
struct cifs_writedata *wdata;
struct cifsFileInfo *cfile = NULL;
int rc = 0;
int saved_rc = 0;
unsigned int xid;
/*
* If wsize is smaller than the page cache size, default to writing
* one page at a time via cifs_writepage
*/
if (cifs_sb->ctx->wsize < PAGE_SIZE)
return generic_writepages(mapping, wbc);
xid = get_xid();
[PATCH] writeback: fix range handling When a writeback_control's `start' and `end' fields are used to indicate a one-byte-range starting at file offset zero, the required values of .start=0,.end=0 mean that the ->writepages() implementation has no way of telling that it is being asked to perform a range request. Because we're currently overloading (start == 0 && end == 0) to mean "this is not a write-a-range request". To make all this sane, the patch changes range of writeback_control. So caller does: If it is calling ->writepages() to write pages, it sets range (range_start/end or range_cyclic) always. And if range_cyclic is true, ->writepages() thinks the range is cyclic, otherwise it just uses range_start and range_end. This patch does, - Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h -1 is usually ok for range_end (type is long long). But, if someone did, range_end += val; range_end is "val - 1" u64val = range_end >> bits; u64val is "~(0ULL)" or something, they are wrong. So, this adds LLONG_MAX to avoid nasty things, and uses LLONG_MAX for range_end. - All callers of ->writepages() sets range_start/end or range_cyclic. - Fix updates of ->writeback_index. It seems already bit strange. If it starts at 0 and ended by check of nr_to_write, this last index may reduce chance to scan end of file. So, this updates ->writeback_index only if range_cyclic is true or whole-file is scanned. Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: Nathan Scott <nathans@sgi.com> Cc: Anton Altaparmakov <aia21@cantab.net> Cc: Steven French <sfrench@us.ibm.com> Cc: "Vladimir V. Saveliev" <vs@namesys.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:03:26 +00:00
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
[PATCH] writeback: fix range handling When a writeback_control's `start' and `end' fields are used to indicate a one-byte-range starting at file offset zero, the required values of .start=0,.end=0 mean that the ->writepages() implementation has no way of telling that it is being asked to perform a range request. Because we're currently overloading (start == 0 && end == 0) to mean "this is not a write-a-range request". To make all this sane, the patch changes range of writeback_control. So caller does: If it is calling ->writepages() to write pages, it sets range (range_start/end or range_cyclic) always. And if range_cyclic is true, ->writepages() thinks the range is cyclic, otherwise it just uses range_start and range_end. This patch does, - Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h -1 is usually ok for range_end (type is long long). But, if someone did, range_end += val; range_end is "val - 1" u64val = range_end >> bits; u64val is "~(0ULL)" or something, they are wrong. So, this adds LLONG_MAX to avoid nasty things, and uses LLONG_MAX for range_end. - All callers of ->writepages() sets range_start/end or range_cyclic. - Fix updates of ->writeback_index. It seems already bit strange. If it starts at 0 and ended by check of nr_to_write, this last index may reduce chance to scan end of file. So, this updates ->writeback_index only if range_cyclic is true or whole-file is scanned. Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: Nathan Scott <nathans@sgi.com> Cc: Anton Altaparmakov <aia21@cantab.net> Cc: Steven French <sfrench@us.ibm.com> Cc: "Vladimir V. Saveliev" <vs@namesys.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:03:26 +00:00
end = -1;
} else {
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT;
[PATCH] writeback: fix range handling When a writeback_control's `start' and `end' fields are used to indicate a one-byte-range starting at file offset zero, the required values of .start=0,.end=0 mean that the ->writepages() implementation has no way of telling that it is being asked to perform a range request. Because we're currently overloading (start == 0 && end == 0) to mean "this is not a write-a-range request". To make all this sane, the patch changes range of writeback_control. So caller does: If it is calling ->writepages() to write pages, it sets range (range_start/end or range_cyclic) always. And if range_cyclic is true, ->writepages() thinks the range is cyclic, otherwise it just uses range_start and range_end. This patch does, - Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h -1 is usually ok for range_end (type is long long). But, if someone did, range_end += val; range_end is "val - 1" u64val = range_end >> bits; u64val is "~(0ULL)" or something, they are wrong. So, this adds LLONG_MAX to avoid nasty things, and uses LLONG_MAX for range_end. - All callers of ->writepages() sets range_start/end or range_cyclic. - Fix updates of ->writeback_index. It seems already bit strange. If it starts at 0 and ended by check of nr_to_write, this last index may reduce chance to scan end of file. So, this updates ->writeback_index only if range_cyclic is true or whole-file is scanned. Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: Nathan Scott <nathans@sgi.com> Cc: Anton Altaparmakov <aia21@cantab.net> Cc: Steven French <sfrench@us.ibm.com> Cc: "Vladimir V. Saveliev" <vs@namesys.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:03:26 +00:00
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = true;
scanned = true;
}
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
retry:
while (!done && index <= end) {
unsigned int i, nr_pages, found_pages, wsize;
pgoff_t next = 0, tofind, saved_index = index;
struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack;
int get_file_rc = 0;
if (cfile)
cifsFileInfo_put(cfile);
cifs: fix rename() by ensuring source handle opened with DELETE bit To rename a file in SMB2 we open it with the DELETE access and do a special SetInfo on it. If the handle is missing the DELETE bit the server will fail the SetInfo with STATUS_ACCESS_DENIED. We currently try to reuse any existing opened handle we have with cifs_get_writable_path(). That function looks for handles with WRITE access but doesn't check for DELETE, making rename() fail if it finds a handle to reuse. Simple reproducer below. To select handles with the DELETE bit, this patch adds a flag argument to cifs_get_writable_path() and find_writable_file() and the existing 'bool fsuid_only' argument is converted to a flag. The cifsFileInfo struct only stores the UNIX open mode but not the original SMB access flags. Since the DELETE bit is not mapped in that mode, this patch stores the access mask in cifs_fid on file open, which is accessible from cifsFileInfo. Simple reproducer: #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #define E(s) perror(s), exit(1) int main(int argc, char *argv[]) { int fd, ret; if (argc != 3) { fprintf(stderr, "Usage: %s A B\n" "create&open A in write mode, " "rename A to B, close A\n", argv[0]); return 0; } fd = openat(AT_FDCWD, argv[1], O_WRONLY|O_CREAT|O_SYNC, 0666); if (fd == -1) E("openat()"); ret = rename(argv[1], argv[2]); if (ret) E("rename()"); ret = close(fd); if (ret) E("close()"); return ret; } $ gcc -o bugrename bugrename.c $ ./bugrename /mnt/a /mnt/b rename(): Permission denied Fixes: 8de9e86c67ba ("cifs: create a helper to find a writeable handle by path name") CC: Stable <stable@vger.kernel.org> Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
2020-02-21 10:19:06 +00:00
rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
/* in case of an error store it to return later */
if (rc)
get_file_rc = rc;
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
&wsize, credits);
if (rc != 0) {
done = true;
break;
}
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
&found_pages);
if (!wdata) {
rc = -ENOMEM;
done = true;
add_credits_and_wake_if(server, credits, 0);
break;
}
if (found_pages == 0) {
kref_put(&wdata->refcount, cifs_writedata_release);
add_credits_and_wake_if(server, credits, 0);
break;
}
nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
end, &index, &next, &done);
/* nothing to write? */
if (nr_pages == 0) {
kref_put(&wdata->refcount, cifs_writedata_release);
add_credits_and_wake_if(server, credits, 0);
continue;
}
wdata->credits = credits_on_stack;
wdata->cfile = cfile;
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
wdata->server = server;
cfile = NULL;
if (!wdata->cfile) {
cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
get_file_rc);
if (is_retryable_error(get_file_rc))
rc = get_file_rc;
else
rc = -EBADF;
} else
rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
for (i = 0; i < nr_pages; ++i)
unlock_page(wdata->pages[i]);
/* send failure -- clean up the mess */
if (rc != 0) {
add_credits_and_wake_if(server, &wdata->credits, 0);
for (i = 0; i < nr_pages; ++i) {
if (is_retryable_error(rc))
redirty_page_for_writepage(wbc,
wdata->pages[i]);
else
SetPageError(wdata->pages[i]);
end_page_writeback(wdata->pages[i]);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
put_page(wdata->pages[i]);
}
if (!is_retryable_error(rc))
mapping_set_error(mapping, rc);
}
kref_put(&wdata->refcount, cifs_writedata_release);
if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
index = saved_index;
continue;
}
/* Return immediately if we received a signal during writing */
if (is_interrupt_error(rc)) {
done = true;
break;
}
if (rc != 0 && saved_rc == 0)
saved_rc = rc;
wbc->nr_to_write -= nr_pages;
if (wbc->nr_to_write <= 0)
done = true;
index = next;
}
if (!scanned && !done) {
/*
* We hit the last page and there is more work to be done: wrap
* back to the start of the file
*/
scanned = true;
index = 0;
goto retry;
}
if (saved_rc != 0)
rc = saved_rc;
[PATCH] writeback: fix range handling When a writeback_control's `start' and `end' fields are used to indicate a one-byte-range starting at file offset zero, the required values of .start=0,.end=0 mean that the ->writepages() implementation has no way of telling that it is being asked to perform a range request. Because we're currently overloading (start == 0 && end == 0) to mean "this is not a write-a-range request". To make all this sane, the patch changes range of writeback_control. So caller does: If it is calling ->writepages() to write pages, it sets range (range_start/end or range_cyclic) always. And if range_cyclic is true, ->writepages() thinks the range is cyclic, otherwise it just uses range_start and range_end. This patch does, - Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h -1 is usually ok for range_end (type is long long). But, if someone did, range_end += val; range_end is "val - 1" u64val = range_end >> bits; u64val is "~(0ULL)" or something, they are wrong. So, this adds LLONG_MAX to avoid nasty things, and uses LLONG_MAX for range_end. - All callers of ->writepages() sets range_start/end or range_cyclic. - Fix updates of ->writeback_index. It seems already bit strange. If it starts at 0 and ended by check of nr_to_write, this last index may reduce chance to scan end of file. So, this updates ->writeback_index only if range_cyclic is true or whole-file is scanned. Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: Nathan Scott <nathans@sgi.com> Cc: Anton Altaparmakov <aia21@cantab.net> Cc: Steven French <sfrench@us.ibm.com> Cc: "Vladimir V. Saveliev" <vs@namesys.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:03:26 +00:00
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
if (cfile)
cifsFileInfo_put(cfile);
free_xid(xid);
/* Indication to update ctime and mtime as close is deferred */
set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
return rc;
}
static int
cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
{
int rc;
unsigned int xid;
xid = get_xid();
/* BB add check for wbc flags */
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
get_page(page);
if (!PageUptodate(page))
cifs_dbg(FYI, "ppw - page not up to date\n");
/*
* Set the "writeback" flag, and clear "dirty" in the radix tree.
*
* A writepage() implementation always needs to do either this,
* or re-dirty the page with "redirty_page_for_writepage()" in
* the case of a failure.
*
* Just unlocking the page will cause the radix tree tag-bits
* to fail to update with the state of the page correctly.
*/
set_page_writeback(page);
retry_write:
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
if (is_retryable_error(rc)) {
if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
goto retry_write;
redirty_page_for_writepage(wbc, page);
} else if (rc != 0) {
SetPageError(page);
mapping_set_error(page->mapping, rc);
} else {
SetPageUptodate(page);
}
end_page_writeback(page);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
put_page(page);
free_xid(xid);
return rc;
}
static int cifs_writepage(struct page *page, struct writeback_control *wbc)
{
int rc = cifs_writepage_locked(page, wbc);
unlock_page(page);
return rc;
}
static int cifs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
int rc;
struct inode *inode = mapping->host;
struct cifsFileInfo *cfile = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
__u32 pid;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = cfile->pid;
else
pid = current->tgid;
cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
page, pos, copied);
if (PageChecked(page)) {
if (copied == len)
SetPageUptodate(page);
ClearPageChecked(page);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
} else if (!PageUptodate(page) && copied == PAGE_SIZE)
SetPageUptodate(page);
if (!PageUptodate(page)) {
char *page_data;
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
unsigned offset = pos & (PAGE_SIZE - 1);
unsigned int xid;
xid = get_xid();
/* this is probably better than directly calling
partialpage_write since in this function the file handle is
known which we might as well leverage */
/* BB check if anything else missing out of ppw
such as updating last write time */
page_data = kmap(page);
rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
/* if (rc < 0) should we set writebehind rc? */
kunmap(page);
free_xid(xid);
} else {
rc = copied;
pos += copied;
set_page_dirty(page);
}
if (rc > 0) {
spin_lock(&inode->i_lock);
if (pos > inode->i_size) {
i_size_write(inode, pos);
inode->i_blocks = (512 - 1 + pos) >> 9;
}
spin_unlock(&inode->i_lock);
}
unlock_page(page);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
put_page(page);
/* Indication to update ctime and mtime as close is deferred */
set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
return rc;
}
int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
unsigned int xid;
int rc = 0;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
struct cifsFileInfo *smbfile = file->private_data;
struct inode *inode = file_inode(file);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
rc = file_write_and_wait_range(file, start, end);
if (rc) {
trace_cifs_fsync_err(inode->i_ino, rc);
return rc;
}
xid = get_xid();
cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
file, datasync);
if (!CIFS_CACHE_READ(CIFS_I(inode))) {
rc = cifs_zap_mapping(inode);
if (rc) {
cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
rc = 0; /* don't care about it in fsync */
}
}
tcon = tlink_tcon(smbfile->tlink);
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
server = tcon->ses->server;
if (server->ops->flush)
rc = server->ops->flush(xid, tcon, &smbfile->fid);
else
rc = -ENOSYS;
}
free_xid(xid);
return rc;
}
int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
unsigned int xid;
int rc = 0;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
struct cifsFileInfo *smbfile = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
rc = file_write_and_wait_range(file, start, end);
if (rc) {
trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
return rc;
}
xid = get_xid();
cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
file, datasync);
tcon = tlink_tcon(smbfile->tlink);
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
server = tcon->ses->server;
if (server->ops->flush)
rc = server->ops->flush(xid, tcon, &smbfile->fid);
else
rc = -ENOSYS;
}
free_xid(xid);
return rc;
}
/*
* As file closes, flush all cached write data for this inode checking
* for write behind errors.
*/
int cifs_flush(struct file *file, fl_owner_t id)
{
struct inode *inode = file_inode(file);
int rc = 0;
if (file->f_mode & FMODE_WRITE)
rc = filemap_write_and_wait(inode->i_mapping);
cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
if (rc)
trace_cifs_flush_err(inode->i_ino, rc);
return rc;
}
static int
cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
{
int rc = 0;
unsigned long i;
for (i = 0; i < num_pages; i++) {
pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
if (!pages[i]) {
/*
* save number of pages we have already allocated and
* return with ENOMEM error
*/
num_pages = i;
rc = -ENOMEM;
break;
}
}
if (rc) {
for (i = 0; i < num_pages; i++)
put_page(pages[i]);
}
return rc;
}
static inline
size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
{
size_t num_pages;
size_t clen;
clen = min_t(const size_t, len, wsize);
num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
if (cur_len)
*cur_len = clen;
return num_pages;
}
static void
cifs_uncached_writedata_release(struct kref *refcount)
{
int i;
struct cifs_writedata *wdata = container_of(refcount,
struct cifs_writedata, refcount);
kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
for (i = 0; i < wdata->nr_pages; i++)
put_page(wdata->pages[i]);
cifs_writedata_release(refcount);
}
static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
static void
cifs_uncached_writev_complete(struct work_struct *work)
{
struct cifs_writedata *wdata = container_of(work,
struct cifs_writedata, work);
struct inode *inode = d_inode(wdata->cfile->dentry);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
spin_lock(&inode->i_lock);
cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
if (cifsi->server_eof > inode->i_size)
i_size_write(inode, cifsi->server_eof);
spin_unlock(&inode->i_lock);
complete(&wdata->done);
collect_uncached_write_data(wdata->ctx);
/* the below call can possibly free the last ref to aio ctx */
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
static int
wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
size_t *len, unsigned long *num_pages)
{
size_t save_len, copied, bytes, cur_len = *len;
unsigned long i, nr_pages = *num_pages;
save_len = cur_len;
for (i = 0; i < nr_pages; i++) {
bytes = min_t(const size_t, cur_len, PAGE_SIZE);
copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
cur_len -= copied;
/*
* If we didn't copy as much as we expected, then that
* may mean we trod into an unmapped area. Stop copying
* at that point. On the next pass through the big
* loop, we'll likely end up getting a zero-length
* write and bailing out of it.
*/
if (copied < bytes)
break;
}
cur_len = save_len - cur_len;
*len = cur_len;
/*
* If we have no data to send, then that probably means that
* the copy above failed altogether. That's most likely because
* the address in the iovec was bogus. Return -EFAULT and let
* the caller free anything we allocated and bail out.
*/
if (!cur_len)
return -EFAULT;
/*
* i + 1 now represents the number of pages we actually used in
* the copy phase above.
*/
*num_pages = i + 1;
return 0;
}
static int
cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
struct cifs_aio_ctx *ctx)
{
unsigned int wsize;
struct cifs_credits credits;
int rc;
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
struct TCP_Server_Info *server = wdata->server;
do {
if (wdata->cfile->invalidHandle) {
rc = cifs_reopen_file(wdata->cfile, false);
if (rc == -EAGAIN)
continue;
else if (rc)
break;
}
/*
* Wait for credits to resend this wdata.
* Note: we are attempting to resend the whole wdata not in
* segments
*/
do {
rc = server->ops->wait_mtu_credits(server, wdata->bytes,
&wsize, &credits);
if (rc)
goto fail;
if (wsize < wdata->bytes) {
add_credits_and_wake_if(server, &credits, 0);
msleep(1000);
}
} while (wsize < wdata->bytes);
wdata->credits = credits;
rc = adjust_credits(server, &wdata->credits, wdata->bytes);
if (!rc) {
if (wdata->cfile->invalidHandle)
rc = -EAGAIN;
else {
#ifdef CONFIG_CIFS_SMB_DIRECT
if (wdata->mr) {
wdata->mr->need_invalidate = true;
smbd_deregister_mr(wdata->mr);
wdata->mr = NULL;
}
#endif
rc = server->ops->async_writev(wdata,
cifs_uncached_writedata_release);
}
}
/* If the write was successfully sent, we are done */
if (!rc) {
list_add_tail(&wdata->list, wdata_list);
return 0;
}
/* Roll back credits and retry if needed */
add_credits_and_wake_if(server, &wdata->credits, 0);
} while (rc == -EAGAIN);
fail:
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
return rc;
}
static int
cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
struct cifsFileInfo *open_file,
struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
struct cifs_aio_ctx *ctx)
{
int rc = 0;
size_t cur_len;
unsigned long nr_pages, num_pages, i;
struct cifs_writedata *wdata;
struct iov_iter saved_from = *from;
loff_t saved_offset = offset;
pid_t pid;
struct TCP_Server_Info *server;
struct page **pagevec;
size_t start;
unsigned int xid;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
else
pid = current->tgid;
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
xid = get_xid();
do {
unsigned int wsize;
struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack;
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, false);
if (rc == -EAGAIN)
continue;
else if (rc)
break;
}
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
&wsize, credits);
if (rc)
break;
cur_len = min_t(const size_t, len, wsize);
if (ctx->direct_io) {
ssize_t result;
result = iov_iter_get_pages_alloc(
from, &pagevec, cur_len, &start);
if (result < 0) {
cifs_dbg(VFS,
"direct_writev couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
result, iov_iter_type(from),
from->iov_offset, from->count);
dump_stack();
rc = result;
add_credits_and_wake_if(server, credits, 0);
break;
}
cur_len = (size_t)result;
iov_iter_advance(from, cur_len);
nr_pages =
(cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
wdata = cifs_writedata_direct_alloc(pagevec,
cifs_uncached_writev_complete);
if (!wdata) {
rc = -ENOMEM;
add_credits_and_wake_if(server, credits, 0);
break;
}
wdata->page_offset = start;
wdata->tailsz =
nr_pages > 1 ?
cur_len - (PAGE_SIZE - start) -
(nr_pages - 2) * PAGE_SIZE :
cur_len;
} else {
nr_pages = get_numpages(wsize, len, &cur_len);
wdata = cifs_writedata_alloc(nr_pages,
cifs_uncached_writev_complete);
if (!wdata) {
rc = -ENOMEM;
add_credits_and_wake_if(server, credits, 0);
break;
}
rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
if (rc) {
kvfree(wdata->pages);
kfree(wdata);
add_credits_and_wake_if(server, credits, 0);
break;
}
num_pages = nr_pages;
rc = wdata_fill_from_iovec(
wdata, from, &cur_len, &num_pages);
if (rc) {
for (i = 0; i < nr_pages; i++)
put_page(wdata->pages[i]);
kvfree(wdata->pages);
kfree(wdata);
add_credits_and_wake_if(server, credits, 0);
break;
}
/*
* Bring nr_pages down to the number of pages we
* actually used, and free any pages that we didn't use.
*/
for ( ; nr_pages > num_pages; nr_pages--)
put_page(wdata->pages[nr_pages - 1]);
wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
}
wdata->sync_mode = WB_SYNC_ALL;
wdata->nr_pages = nr_pages;
wdata->offset = (__u64)offset;
wdata->cfile = cifsFileInfo_get(open_file);
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
wdata->server = server;
wdata->pid = pid;
wdata->bytes = cur_len;
wdata->pagesz = PAGE_SIZE;
wdata->credits = credits_on_stack;
wdata->ctx = ctx;
kref_get(&ctx->refcount);
rc = adjust_credits(server, &wdata->credits, wdata->bytes);
if (!rc) {
if (wdata->cfile->invalidHandle)
rc = -EAGAIN;
else
rc = server->ops->async_writev(wdata,
cifs_uncached_writedata_release);
}
if (rc) {
add_credits_and_wake_if(server, &wdata->credits, 0);
kref_put(&wdata->refcount,
cifs_uncached_writedata_release);
if (rc == -EAGAIN) {
*from = saved_from;
iov_iter_advance(from, offset - saved_offset);
continue;
}
break;
}
list_add_tail(&wdata->list, wdata_list);
offset += cur_len;
len -= cur_len;
} while (len > 0);
free_xid(xid);
return rc;
}
static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
{
struct cifs_writedata *wdata, *tmp;
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
struct dentry *dentry = ctx->cfile->dentry;
ssize_t rc;
tcon = tlink_tcon(ctx->cfile->tlink);
cifs_sb = CIFS_SB(dentry->d_sb);
mutex_lock(&ctx->aio_mutex);
if (list_empty(&ctx->list)) {
mutex_unlock(&ctx->aio_mutex);
return;
}
rc = ctx->rc;
/*
* Wait for and collect replies for any successful sends in order of
* increasing offset. Once an error is hit, then return without waiting
* for any more replies.
*/
restart_loop:
list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
if (!rc) {
if (!try_wait_for_completion(&wdata->done)) {
mutex_unlock(&ctx->aio_mutex);
return;
}
if (wdata->result)
rc = wdata->result;
else
ctx->total_len += wdata->bytes;
/* resend call if it's a retryable error */
if (rc == -EAGAIN) {
struct list_head tmp_list;
struct iov_iter tmp_from = ctx->iter;
INIT_LIST_HEAD(&tmp_list);
list_del_init(&wdata->list);
if (ctx->direct_io)
rc = cifs_resend_wdata(
wdata, &tmp_list, ctx);
else {
iov_iter_advance(&tmp_from,
wdata->offset - ctx->pos);
rc = cifs_write_from_iter(wdata->offset,
wdata->bytes, &tmp_from,
ctx->cfile, cifs_sb, &tmp_list,
ctx);
kref_put(&wdata->refcount,
cifs_uncached_writedata_release);
}
list_splice(&tmp_list, &ctx->list);
goto restart_loop;
}
}
list_del_init(&wdata->list);
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
cifs_stats_bytes_written(tcon, ctx->total_len);
set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
ctx->rc = (rc == 0) ? ctx->total_len : rc;
mutex_unlock(&ctx->aio_mutex);
if (ctx->iocb && ctx->iocb->ki_complete)
ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
else
complete(&ctx->done);
}
static ssize_t __cifs_writev(
struct kiocb *iocb, struct iov_iter *from, bool direct)
{
struct file *file = iocb->ki_filp;
ssize_t total_written = 0;
struct cifsFileInfo *cfile;
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
struct cifs_aio_ctx *ctx;
struct iov_iter saved_from = *from;
size_t len = iov_iter_count(from);
int rc;
/*
* iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
* In this case, fall back to non-direct write function.
* this could be improved by getting pages directly in ITER_KVEC
*/
if (direct && iov_iter_is_kvec(from)) {
cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
direct = false;
}
rc = generic_write_checks(iocb, from);
if (rc <= 0)
return rc;
cifs_sb = CIFS_FILE_SB(file);
cfile = file->private_data;
tcon = tlink_tcon(cfile->tlink);
if (!tcon->ses->server->ops->async_writev)
return -ENOSYS;
ctx = cifs_aio_ctx_alloc();
if (!ctx)
return -ENOMEM;
ctx->cfile = cifsFileInfo_get(cfile);
if (!is_sync_kiocb(iocb))
ctx->iocb = iocb;
ctx->pos = iocb->ki_pos;
if (direct) {
ctx->direct_io = true;
ctx->iter = *from;
ctx->len = len;
} else {
rc = setup_aio_ctx_iter(ctx, from, WRITE);
if (rc) {
kref_put(&ctx->refcount, cifs_aio_ctx_release);
return rc;
}
}
/* grab a lock here due to read response handlers can access ctx */
mutex_lock(&ctx->aio_mutex);
rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
cfile, cifs_sb, &ctx->list, ctx);
/*
* If at least one write was successfully sent, then discard any rc
* value from the later writes. If the other write succeeds, then
* we'll end up returning whatever was written. If it fails, then
* we'll get a new rc value from that.
*/
if (!list_empty(&ctx->list))
rc = 0;
mutex_unlock(&ctx->aio_mutex);
if (rc) {
kref_put(&ctx->refcount, cifs_aio_ctx_release);
return rc;
}
if (!is_sync_kiocb(iocb)) {
kref_put(&ctx->refcount, cifs_aio_ctx_release);
return -EIOCBQUEUED;
}
rc = wait_for_completion_killable(&ctx->done);
if (rc) {
mutex_lock(&ctx->aio_mutex);
ctx->rc = rc = -EINTR;
total_written = ctx->total_len;
mutex_unlock(&ctx->aio_mutex);
} else {
rc = ctx->rc;
total_written = ctx->total_len;
}
kref_put(&ctx->refcount, cifs_aio_ctx_release);
if (unlikely(!total_written))
return rc;
iocb->ki_pos += total_written;
return total_written;
}
ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
{
return __cifs_writev(iocb, from, true);
}
ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
{
return __cifs_writev(iocb, from, false);
}
static ssize_t
cifs_writev(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct inode *inode = file->f_mapping->host;
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
ssize_t rc;
CIFS: fix circular locking dependency When a CIFS filesystem is mounted with the forcemand option and the following command is run on it, lockdep warns about a circular locking dependency between CifsInodeInfo::lock_sem and the inode lock. while echo foo > hello; do :; done & while touch -c hello; do :; done cifs_writev() takes the locks in the wrong order, but note that we can't only flip the order around because it releases the inode lock before the call to generic_write_sync() while it holds the lock_sem across that call. But, AFAICS, there is no need to hold the CifsInodeInfo::lock_sem across the generic_write_sync() call either, so we can release both the locks before generic_write_sync(), and change the order. ====================================================== WARNING: possible circular locking dependency detected 4.12.0-rc7+ #9 Not tainted ------------------------------------------------------ touch/487 is trying to acquire lock: (&cifsi->lock_sem){++++..}, at: cifsFileInfo_put+0x88f/0x16a0 but task is already holding lock: (&sb->s_type->i_mutex_key#11){+.+.+.}, at: utimes_common+0x3ad/0x870 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&sb->s_type->i_mutex_key#11){+.+.+.}: __lock_acquire+0x1f74/0x38f0 lock_acquire+0x1cc/0x600 down_write+0x74/0x110 cifs_strict_writev+0x3cb/0x8c0 __vfs_write+0x4c1/0x930 vfs_write+0x14c/0x2d0 SyS_write+0xf7/0x240 entry_SYSCALL_64_fastpath+0x1f/0xbe -> #0 (&cifsi->lock_sem){++++..}: check_prevs_add+0xfa0/0x1d10 __lock_acquire+0x1f74/0x38f0 lock_acquire+0x1cc/0x600 down_write+0x74/0x110 cifsFileInfo_put+0x88f/0x16a0 cifs_setattr+0x992/0x1680 notify_change+0x61a/0xa80 utimes_common+0x3d4/0x870 do_utimes+0x1c1/0x220 SyS_utimensat+0x84/0x1a0 entry_SYSCALL_64_fastpath+0x1f/0xbe other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sb->s_type->i_mutex_key#11); lock(&cifsi->lock_sem); lock(&sb->s_type->i_mutex_key#11); lock(&cifsi->lock_sem); *** DEADLOCK *** 2 locks held by touch/487: #0: (sb_writers#10){.+.+.+}, at: mnt_want_write+0x41/0xb0 #1: (&sb->s_type->i_mutex_key#11){+.+.+.}, at: utimes_common+0x3ad/0x870 stack backtrace: CPU: 0 PID: 487 Comm: touch Not tainted 4.12.0-rc7+ #9 Call Trace: dump_stack+0xdb/0x185 print_circular_bug+0x45b/0x790 __lock_acquire+0x1f74/0x38f0 lock_acquire+0x1cc/0x600 down_write+0x74/0x110 cifsFileInfo_put+0x88f/0x16a0 cifs_setattr+0x992/0x1680 notify_change+0x61a/0xa80 utimes_common+0x3d4/0x870 do_utimes+0x1c1/0x220 SyS_utimensat+0x84/0x1a0 entry_SYSCALL_64_fastpath+0x1f/0xbe Fixes: 19dfc1f5f2ef03a52 ("cifs: fix the race in cifs_writev()") Signed-off-by: Rabin Vincent <rabinv@axis.com> Signed-off-by: Steve French <smfrench@gmail.com> Acked-by: Pavel Shilovsky <pshilov@microsoft.com>
2017-06-29 14:01:42 +00:00
inode_lock(inode);
/*
* We need to hold the sem to be sure nobody modifies lock list
* with a brlock that prevents writing.
*/
down_read(&cinode->lock_sem);
rc = generic_write_checks(iocb, from);
if (rc <= 0)
goto out;
if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
server->vals->exclusive_lock_type, 0,
NULL, CIFS_WRITE_OP))
rc = __generic_file_write_iter(iocb, from);
else
rc = -EACCES;
out:
CIFS: fix circular locking dependency When a CIFS filesystem is mounted with the forcemand option and the following command is run on it, lockdep warns about a circular locking dependency between CifsInodeInfo::lock_sem and the inode lock. while echo foo > hello; do :; done & while touch -c hello; do :; done cifs_writev() takes the locks in the wrong order, but note that we can't only flip the order around because it releases the inode lock before the call to generic_write_sync() while it holds the lock_sem across that call. But, AFAICS, there is no need to hold the CifsInodeInfo::lock_sem across the generic_write_sync() call either, so we can release both the locks before generic_write_sync(), and change the order. ====================================================== WARNING: possible circular locking dependency detected 4.12.0-rc7+ #9 Not tainted ------------------------------------------------------ touch/487 is trying to acquire lock: (&cifsi->lock_sem){++++..}, at: cifsFileInfo_put+0x88f/0x16a0 but task is already holding lock: (&sb->s_type->i_mutex_key#11){+.+.+.}, at: utimes_common+0x3ad/0x870 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&sb->s_type->i_mutex_key#11){+.+.+.}: __lock_acquire+0x1f74/0x38f0 lock_acquire+0x1cc/0x600 down_write+0x74/0x110 cifs_strict_writev+0x3cb/0x8c0 __vfs_write+0x4c1/0x930 vfs_write+0x14c/0x2d0 SyS_write+0xf7/0x240 entry_SYSCALL_64_fastpath+0x1f/0xbe -> #0 (&cifsi->lock_sem){++++..}: check_prevs_add+0xfa0/0x1d10 __lock_acquire+0x1f74/0x38f0 lock_acquire+0x1cc/0x600 down_write+0x74/0x110 cifsFileInfo_put+0x88f/0x16a0 cifs_setattr+0x992/0x1680 notify_change+0x61a/0xa80 utimes_common+0x3d4/0x870 do_utimes+0x1c1/0x220 SyS_utimensat+0x84/0x1a0 entry_SYSCALL_64_fastpath+0x1f/0xbe other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sb->s_type->i_mutex_key#11); lock(&cifsi->lock_sem); lock(&sb->s_type->i_mutex_key#11); lock(&cifsi->lock_sem); *** DEADLOCK *** 2 locks held by touch/487: #0: (sb_writers#10){.+.+.+}, at: mnt_want_write+0x41/0xb0 #1: (&sb->s_type->i_mutex_key#11){+.+.+.}, at: utimes_common+0x3ad/0x870 stack backtrace: CPU: 0 PID: 487 Comm: touch Not tainted 4.12.0-rc7+ #9 Call Trace: dump_stack+0xdb/0x185 print_circular_bug+0x45b/0x790 __lock_acquire+0x1f74/0x38f0 lock_acquire+0x1cc/0x600 down_write+0x74/0x110 cifsFileInfo_put+0x88f/0x16a0 cifs_setattr+0x992/0x1680 notify_change+0x61a/0xa80 utimes_common+0x3d4/0x870 do_utimes+0x1c1/0x220 SyS_utimensat+0x84/0x1a0 entry_SYSCALL_64_fastpath+0x1f/0xbe Fixes: 19dfc1f5f2ef03a52 ("cifs: fix the race in cifs_writev()") Signed-off-by: Rabin Vincent <rabinv@axis.com> Signed-off-by: Steve French <smfrench@gmail.com> Acked-by: Pavel Shilovsky <pshilov@microsoft.com>
2017-06-29 14:01:42 +00:00
up_read(&cinode->lock_sem);
inode_unlock(inode);
if (rc > 0)
rc = generic_write_sync(iocb, rc);
return rc;
}
ssize_t
cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = (struct cifsFileInfo *)
iocb->ki_filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
ssize_t written;
cifs: Wait for writebacks to complete before attempting write. Problem reported in Red Hat bz 1040329 for strict writes where we cache only when we hold oplock and write direct to the server when we don't. When we receive an oplock break, we first change the oplock value for the inode in cifsInodeInfo->oplock to indicate that we no longer hold the oplock before we enqueue a task to flush changes to the backing device. Once we have completed flushing the changes, we return the oplock to the server. There are 2 ways here where we can have data corruption 1) While we flush changes to the backing device as part of the oplock break, we can have processes write to the file. These writes check for the oplock, find none and attempt to write directly to the server. These direct writes made while we are flushing from cache could be overwritten by data being flushed from the cache causing data corruption. 2) While a thread runs in cifs_strict_writev, the machine could receive and process an oplock break after the thread has checked the oplock and found that it allows us to cache and before we have made changes to the cache. In that case, we end up with a dirty page in cache when we shouldn't have any. This will be flushed later and will overwrite all subsequent writes to the part of the file represented by this page. Before making any writes to the server, we need to confirm that we are not in the process of flushing data to the server and if we are, we should wait until the process is complete before we attempt the write. We should also wait for existing writes to complete before we process an oplock break request which changes oplock values. We add a version specific downgrade_oplock() operation to allow for differences in the oplock values set for the different smb versions. Cc: stable@vger.kernel.org Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Steve French <smfrench@gmail.com>
2014-03-11 16:11:47 +00:00
written = cifs_get_writer(cinode);
if (written)
return written;
if (CIFS_CACHE_WRITE(cinode)) {
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
cifs: Wait for writebacks to complete before attempting write. Problem reported in Red Hat bz 1040329 for strict writes where we cache only when we hold oplock and write direct to the server when we don't. When we receive an oplock break, we first change the oplock value for the inode in cifsInodeInfo->oplock to indicate that we no longer hold the oplock before we enqueue a task to flush changes to the backing device. Once we have completed flushing the changes, we return the oplock to the server. There are 2 ways here where we can have data corruption 1) While we flush changes to the backing device as part of the oplock break, we can have processes write to the file. These writes check for the oplock, find none and attempt to write directly to the server. These direct writes made while we are flushing from cache could be overwritten by data being flushed from the cache causing data corruption. 2) While a thread runs in cifs_strict_writev, the machine could receive and process an oplock break after the thread has checked the oplock and found that it allows us to cache and before we have made changes to the cache. In that case, we end up with a dirty page in cache when we shouldn't have any. This will be flushed later and will overwrite all subsequent writes to the part of the file represented by this page. Before making any writes to the server, we need to confirm that we are not in the process of flushing data to the server and if we are, we should wait until the process is complete before we attempt the write. We should also wait for existing writes to complete before we process an oplock break request which changes oplock values. We add a version specific downgrade_oplock() operation to allow for differences in the oplock values set for the different smb versions. Cc: stable@vger.kernel.org Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Steve French <smfrench@gmail.com>
2014-03-11 16:11:47 +00:00
&& ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
written = generic_file_write_iter(iocb, from);
cifs: Wait for writebacks to complete before attempting write. Problem reported in Red Hat bz 1040329 for strict writes where we cache only when we hold oplock and write direct to the server when we don't. When we receive an oplock break, we first change the oplock value for the inode in cifsInodeInfo->oplock to indicate that we no longer hold the oplock before we enqueue a task to flush changes to the backing device. Once we have completed flushing the changes, we return the oplock to the server. There are 2 ways here where we can have data corruption 1) While we flush changes to the backing device as part of the oplock break, we can have processes write to the file. These writes check for the oplock, find none and attempt to write directly to the server. These direct writes made while we are flushing from cache could be overwritten by data being flushed from the cache causing data corruption. 2) While a thread runs in cifs_strict_writev, the machine could receive and process an oplock break after the thread has checked the oplock and found that it allows us to cache and before we have made changes to the cache. In that case, we end up with a dirty page in cache when we shouldn't have any. This will be flushed later and will overwrite all subsequent writes to the part of the file represented by this page. Before making any writes to the server, we need to confirm that we are not in the process of flushing data to the server and if we are, we should wait until the process is complete before we attempt the write. We should also wait for existing writes to complete before we process an oplock break request which changes oplock values. We add a version specific downgrade_oplock() operation to allow for differences in the oplock values set for the different smb versions. Cc: stable@vger.kernel.org Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Steve French <smfrench@gmail.com>
2014-03-11 16:11:47 +00:00
goto out;
}
written = cifs_writev(iocb, from);
cifs: Wait for writebacks to complete before attempting write. Problem reported in Red Hat bz 1040329 for strict writes where we cache only when we hold oplock and write direct to the server when we don't. When we receive an oplock break, we first change the oplock value for the inode in cifsInodeInfo->oplock to indicate that we no longer hold the oplock before we enqueue a task to flush changes to the backing device. Once we have completed flushing the changes, we return the oplock to the server. There are 2 ways here where we can have data corruption 1) While we flush changes to the backing device as part of the oplock break, we can have processes write to the file. These writes check for the oplock, find none and attempt to write directly to the server. These direct writes made while we are flushing from cache could be overwritten by data being flushed from the cache causing data corruption. 2) While a thread runs in cifs_strict_writev, the machine could receive and process an oplock break after the thread has checked the oplock and found that it allows us to cache and before we have made changes to the cache. In that case, we end up with a dirty page in cache when we shouldn't have any. This will be flushed later and will overwrite all subsequent writes to the part of the file represented by this page. Before making any writes to the server, we need to confirm that we are not in the process of flushing data to the server and if we are, we should wait until the process is complete before we attempt the write. We should also wait for existing writes to complete before we process an oplock break request which changes oplock values. We add a version specific downgrade_oplock() operation to allow for differences in the oplock values set for the different smb versions. Cc: stable@vger.kernel.org Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Steve French <smfrench@gmail.com>
2014-03-11 16:11:47 +00:00
goto out;
}
/*
* For non-oplocked files in strict cache mode we need to write the data
* to the server exactly from the pos to pos+len-1 rather than flush all
* affected pages because it may cause a error with mandatory locks on
* these pages but not on the region from pos to ppos+len-1.
*/
written = cifs_user_writev(iocb, from);
if (CIFS_CACHE_READ(cinode)) {
/*
* We have read level caching and we have just sent a write
* request to the server thus making data in the cache stale.
* Zap the cache and set oplock/lease level to NONE to avoid
* reading stale data from the cache. All subsequent read
* operations will read new data from the server.
*/
cifs_zap_mapping(inode);
cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
inode);
cinode->oplock = 0;
}
cifs: Wait for writebacks to complete before attempting write. Problem reported in Red Hat bz 1040329 for strict writes where we cache only when we hold oplock and write direct to the server when we don't. When we receive an oplock break, we first change the oplock value for the inode in cifsInodeInfo->oplock to indicate that we no longer hold the oplock before we enqueue a task to flush changes to the backing device. Once we have completed flushing the changes, we return the oplock to the server. There are 2 ways here where we can have data corruption 1) While we flush changes to the backing device as part of the oplock break, we can have processes write to the file. These writes check for the oplock, find none and attempt to write directly to the server. These direct writes made while we are flushing from cache could be overwritten by data being flushed from the cache causing data corruption. 2) While a thread runs in cifs_strict_writev, the machine could receive and process an oplock break after the thread has checked the oplock and found that it allows us to cache and before we have made changes to the cache. In that case, we end up with a dirty page in cache when we shouldn't have any. This will be flushed later and will overwrite all subsequent writes to the part of the file represented by this page. Before making any writes to the server, we need to confirm that we are not in the process of flushing data to the server and if we are, we should wait until the process is complete before we attempt the write. We should also wait for existing writes to complete before we process an oplock break request which changes oplock values. We add a version specific downgrade_oplock() operation to allow for differences in the oplock values set for the different smb versions. Cc: stable@vger.kernel.org Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Steve French <smfrench@gmail.com>
2014-03-11 16:11:47 +00:00
out:
cifs_put_writer(cinode);
return written;
}
static struct cifs_readdata *
cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
{
struct cifs_readdata *rdata;
rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
if (rdata != NULL) {
rdata->pages = pages;
kref_init(&rdata->refcount);
INIT_LIST_HEAD(&rdata->list);
init_completion(&rdata->done);
INIT_WORK(&rdata->work, complete);
}
return rdata;
}
static struct cifs_readdata *
cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
{
struct page **pages =
treewide: kzalloc() -> kcalloc() The kzalloc() function has a 2-factor argument form, kcalloc(). This patch replaces cases of: kzalloc(a * b, gfp) with: kcalloc(a * b, gfp) as well as handling cases of: kzalloc(a * b * c, gfp) with: kzalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kzalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kzalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kzalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kzalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kzalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(char) * COUNT + COUNT , ...) | kzalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kzalloc + kcalloc ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kzalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kzalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kzalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kzalloc(C1 * C2 * C3, ...) | kzalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kzalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kzalloc(sizeof(THING) * C2, ...) | kzalloc(sizeof(TYPE) * C2, ...) | kzalloc(C1 * C2 * C3, ...) | kzalloc(C1 * C2, ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - (E1) * E2 + E1, E2 , ...) | - kzalloc + kcalloc ( - (E1) * (E2) + E1, E2 , ...) | - kzalloc + kcalloc ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:03:40 +00:00
kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
struct cifs_readdata *ret = NULL;
if (pages) {
ret = cifs_readdata_direct_alloc(pages, complete);
if (!ret)
kfree(pages);
}
return ret;
}
void
cifs_readdata_release(struct kref *refcount)
{
struct cifs_readdata *rdata = container_of(refcount,
struct cifs_readdata, refcount);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (rdata->mr) {
smbd_deregister_mr(rdata->mr);
rdata->mr = NULL;
}
#endif
if (rdata->cfile)
cifsFileInfo_put(rdata->cfile);
kvfree(rdata->pages);
kfree(rdata);
}
static int
cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
{
int rc = 0;
struct page *page;
unsigned int i;
for (i = 0; i < nr_pages; i++) {
page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
if (!page) {
rc = -ENOMEM;
break;
}
rdata->pages[i] = page;
}
if (rc) {
unsigned int nr_page_failed = i;
for (i = 0; i < nr_page_failed; i++) {
put_page(rdata->pages[i]);
rdata->pages[i] = NULL;
}
}
return rc;
}
static void
cifs_uncached_readdata_release(struct kref *refcount)
{
struct cifs_readdata *rdata = container_of(refcount,
struct cifs_readdata, refcount);
unsigned int i;
kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
for (i = 0; i < rdata->nr_pages; i++) {
put_page(rdata->pages[i]);
}
cifs_readdata_release(refcount);
}
/**
* cifs_readdata_to_iov - copy data from pages in response to an iovec
* @rdata: the readdata response with list of pages holding data
* @iter: destination for our data
*
* This function copies data from a list of pages in a readdata response into
* an array of iovecs. It will first calculate where the data should go
* based on the info in the readdata and then copy the data into that spot.
*/
static int
cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
{
size_t remaining = rdata->got_bytes;
unsigned int i;
for (i = 0; i < rdata->nr_pages; i++) {
struct page *page = rdata->pages[i];
size_t copy = min_t(size_t, remaining, PAGE_SIZE);
size_t written;
if (unlikely(iov_iter_is_pipe(iter))) {
void *addr = kmap_atomic(page);
written = copy_to_iter(addr, copy, iter);
kunmap_atomic(addr);
} else
written = copy_page_to_iter(page, 0, copy, iter);
remaining -= written;
if (written < copy && iov_iter_count(iter) > 0)
break;
}
return remaining ? -EFAULT : 0;
}
static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
static void
cifs_uncached_readv_complete(struct work_struct *work)
{
struct cifs_readdata *rdata = container_of(work,
struct cifs_readdata, work);
complete(&rdata->done);
collect_uncached_read_data(rdata->ctx);
/* the below call can possibly free the last ref to aio ctx */
kref_put(&rdata->refcount, cifs_uncached_readdata_release);
}
static int
uncached_fill_pages(struct TCP_Server_Info *server,
struct cifs_readdata *rdata, struct iov_iter *iter,
unsigned int len)
{
int result = 0;
unsigned int i;
unsigned int nr_pages = rdata->nr_pages;
unsigned int page_offset = rdata->page_offset;
rdata->got_bytes = 0;
rdata->tailsz = PAGE_SIZE;
for (i = 0; i < nr_pages; i++) {
struct page *page = rdata->pages[i];
size_t n;
unsigned int segment_size = rdata->pagesz;
if (i == 0)
segment_size -= page_offset;
else
page_offset = 0;
if (len <= 0) {
/* no need to hold page hostage */
rdata->pages[i] = NULL;
rdata->nr_pages--;
put_page(page);
continue;
}
n = len;
if (len >= segment_size)
/* enough data to fill the page */
n = segment_size;
else
rdata->tailsz = len;
len -= n;
if (iter)
result = copy_page_from_iter(
page, page_offset, n, iter);
#ifdef CONFIG_CIFS_SMB_DIRECT
else if (rdata->mr)
result = n;
#endif
else
result = cifs_read_page_from_socket(
server, page, page_offset, n);
if (result < 0)
break;
rdata->got_bytes += result;
}
return rdata->got_bytes > 0 && result != -ECONNABORTED ?
rdata->got_bytes : result;
}
static int
cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
struct cifs_readdata *rdata, unsigned int len)
{
return uncached_fill_pages(server, rdata, NULL, len);
}
static int
cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
struct cifs_readdata *rdata,
struct iov_iter *iter)
{
return uncached_fill_pages(server, rdata, iter, iter->count);
}
static int cifs_resend_rdata(struct cifs_readdata *rdata,
struct list_head *rdata_list,
struct cifs_aio_ctx *ctx)
{
unsigned int rsize;
struct cifs_credits credits;
int rc;
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
struct TCP_Server_Info *server;
/* XXX: should we pick a new channel here? */
server = rdata->server;
do {
if (rdata->cfile->invalidHandle) {
rc = cifs_reopen_file(rdata->cfile, true);
if (rc == -EAGAIN)
continue;
else if (rc)
break;
}
/*
* Wait for credits to resend this rdata.
* Note: we are attempting to resend the whole rdata not in
* segments
*/
do {
rc = server->ops->wait_mtu_credits(server, rdata->bytes,
&rsize, &credits);
if (rc)
goto fail;
if (rsize < rdata->bytes) {
add_credits_and_wake_if(server, &credits, 0);
msleep(1000);
}
} while (rsize < rdata->bytes);
rdata->credits = credits;
rc = adjust_credits(server, &rdata->credits, rdata->bytes);
if (!rc) {
if (rdata->cfile->invalidHandle)
rc = -EAGAIN;
else {
#ifdef CONFIG_CIFS_SMB_DIRECT
if (rdata->mr) {
rdata->mr->need_invalidate = true;
smbd_deregister_mr(rdata->mr);
rdata->mr = NULL;
}
#endif
rc = server->ops->async_readv(rdata);
}
}
/* If the read was successfully sent, we are done */
if (!rc) {
/* Add to aio pending list */
list_add_tail(&rdata->list, rdata_list);
return 0;
}
/* Roll back credits and retry if needed */
add_credits_and_wake_if(server, &rdata->credits, 0);
} while (rc == -EAGAIN);
fail:
kref_put(&rdata->refcount, cifs_uncached_readdata_release);
return rc;
}
static int
cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
struct cifs_aio_ctx *ctx)
{
struct cifs_readdata *rdata;
unsigned int npages, rsize;
struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack;
size_t cur_len;
int rc;
pid_t pid;
struct TCP_Server_Info *server;
struct page **pagevec;
size_t start;
struct iov_iter direct_iov = ctx->iter;
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
else
pid = current->tgid;
if (ctx->direct_io)
iov_iter_advance(&direct_iov, offset - ctx->pos);
do {
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, true);
if (rc == -EAGAIN)
continue;
else if (rc)
break;
}
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
&rsize, credits);
if (rc)
break;
cur_len = min_t(const size_t, len, rsize);
if (ctx->direct_io) {
ssize_t result;
result = iov_iter_get_pages_alloc(
&direct_iov, &pagevec,
cur_len, &start);
if (result < 0) {
cifs_dbg(VFS,
"Couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
result, iov_iter_type(&direct_iov),
direct_iov.iov_offset,
direct_iov.count);
dump_stack();
rc = result;
add_credits_and_wake_if(server, credits, 0);
break;
}
cur_len = (size_t)result;
iov_iter_advance(&direct_iov, cur_len);
rdata = cifs_readdata_direct_alloc(
pagevec, cifs_uncached_readv_complete);
if (!rdata) {
add_credits_and_wake_if(server, credits, 0);
rc = -ENOMEM;
break;
}
npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
rdata->page_offset = start;
rdata->tailsz = npages > 1 ?
cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
cur_len;
} else {
npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
/* allocate a readdata struct */
rdata = cifs_readdata_alloc(npages,
cifs_uncached_readv_complete);
if (!rdata) {
add_credits_and_wake_if(server, credits, 0);
rc = -ENOMEM;
break;
}
rc = cifs_read_allocate_pages(rdata, npages);
if (rc) {
kvfree(rdata->pages);
kfree(rdata);
add_credits_and_wake_if(server, credits, 0);
break;
}
rdata->tailsz = PAGE_SIZE;
}
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
rdata->server = server;
rdata->cfile = cifsFileInfo_get(open_file);
rdata->nr_pages = npages;
rdata->offset = offset;
rdata->bytes = cur_len;
rdata->pid = pid;
rdata->pagesz = PAGE_SIZE;
rdata->read_into_pages = cifs_uncached_read_into_pages;
rdata->copy_into_pages = cifs_uncached_copy_into_pages;
rdata->credits = credits_on_stack;
rdata->ctx = ctx;
kref_get(&ctx->refcount);
rc = adjust_credits(server, &rdata->credits, rdata->bytes);
if (!rc) {
if (rdata->cfile->invalidHandle)
rc = -EAGAIN;
else
rc = server->ops->async_readv(rdata);
}
if (rc) {
add_credits_and_wake_if(server, &rdata->credits, 0);
kref_put(&rdata->refcount,
cifs_uncached_readdata_release);
if (rc == -EAGAIN) {
iov_iter_revert(&direct_iov, cur_len);
continue;
}
break;
}
list_add_tail(&rdata->list, rdata_list);
offset += cur_len;
len -= cur_len;
} while (len > 0);
return rc;
}
static void
collect_uncached_read_data(struct cifs_aio_ctx *ctx)
{
struct cifs_readdata *rdata, *tmp;
struct iov_iter *to = &ctx->iter;
struct cifs_sb_info *cifs_sb;
int rc;
cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
mutex_lock(&ctx->aio_mutex);
if (list_empty(&ctx->list)) {
mutex_unlock(&ctx->aio_mutex);
return;
}
rc = ctx->rc;
/* the loop below should proceed in the order of increasing offsets */
again:
list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
if (!rc) {
if (!try_wait_for_completion(&rdata->done)) {
mutex_unlock(&ctx->aio_mutex);
return;
}
if (rdata->result == -EAGAIN) {
/* resend call if it's a retryable error */
struct list_head tmp_list;
unsigned int got_bytes = rdata->got_bytes;
list_del_init(&rdata->list);
INIT_LIST_HEAD(&tmp_list);
/*
* Got a part of data and then reconnect has
* happened -- fill the buffer and continue
* reading.
*/
if (got_bytes && got_bytes < rdata->bytes) {
rc = 0;
if (!ctx->direct_io)
rc = cifs_readdata_to_iov(rdata, to);
if (rc) {
kref_put(&rdata->refcount,
cifs_uncached_readdata_release);
continue;
}
}
if (ctx->direct_io) {
/*
* Re-use rdata as this is a
* direct I/O
*/
rc = cifs_resend_rdata(
rdata,
&tmp_list, ctx);
} else {
rc = cifs_send_async_read(
rdata->offset + got_bytes,
rdata->bytes - got_bytes,
rdata->cfile, cifs_sb,
&tmp_list, ctx);
kref_put(&rdata->refcount,
cifs_uncached_readdata_release);
}
list_splice(&tmp_list, &ctx->list);
goto again;
} else if (rdata->result)
rc = rdata->result;
else if (!ctx->direct_io)
rc = cifs_readdata_to_iov(rdata, to);
/* if there was a short read -- discard anything left */
if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
rc = -ENODATA;
ctx->total_len += rdata->got_bytes;
}
list_del_init(&rdata->list);
kref_put(&rdata->refcount, cifs_uncached_readdata_release);
}
if (!ctx->direct_io)
ctx->total_len = ctx->len - iov_iter_count(to);
/* mask nodata case */
if (rc == -ENODATA)
rc = 0;
CIFS: Fix bug which the return value by asynchronous read is error This patch is used to fix the bug in collect_uncached_read_data() that rc is automatically converted from a signed number to an unsigned number when the CIFS asynchronous read fails. It will cause ctx->rc is error. Example: Share a directory and create a file on the Windows OS. Mount the directory to the Linux OS using CIFS. On the CIFS client of the Linux OS, invoke the pread interface to deliver the read request. The size of the read length plus offset of the read request is greater than the maximum file size. In this case, the CIFS server on the Windows OS returns a failure message (for example, the return value of smb2.nt_status is STATUS_INVALID_PARAMETER). After receiving the response message, the CIFS client parses smb2.nt_status to STATUS_INVALID_PARAMETER and converts it to the Linux error code (rdata->result=-22). Then the CIFS client invokes the collect_uncached_read_data function to assign the value of rdata->result to rc, that is, rc=rdata->result=-22. The type of the ctx->total_len variable is unsigned integer, the type of the rc variable is integer, and the type of the ctx->rc variable is ssize_t. Therefore, during the ternary operation, the value of rc is automatically converted to an unsigned number. The final result is ctx->rc=4294967274. However, the expected result is ctx->rc=-22. Signed-off-by: Yilu Lin <linyilu@huawei.com> Signed-off-by: Steve French <stfrench@microsoft.com> CC: Stable <stable@vger.kernel.org> Acked-by: Ronnie Sahlberg <lsahlber@redhat.com>
2020-03-18 03:59:19 +00:00
ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
mutex_unlock(&ctx->aio_mutex);
if (ctx->iocb && ctx->iocb->ki_complete)
ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
else
complete(&ctx->done);
}
static ssize_t __cifs_readv(
struct kiocb *iocb, struct iov_iter *to, bool direct)
{
size_t len;
struct file *file = iocb->ki_filp;
struct cifs_sb_info *cifs_sb;
struct cifsFileInfo *cfile;
struct cifs_tcon *tcon;
ssize_t rc, total_read = 0;
loff_t offset = iocb->ki_pos;
struct cifs_aio_ctx *ctx;
/*
* iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
* fall back to data copy read path
* this could be improved by getting pages directly in ITER_KVEC
*/
if (direct && iov_iter_is_kvec(to)) {
cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
direct = false;
}
len = iov_iter_count(to);
if (!len)
return 0;
cifs_sb = CIFS_FILE_SB(file);
cfile = file->private_data;
tcon = tlink_tcon(cfile->tlink);
if (!tcon->ses->server->ops->async_readv)
return -ENOSYS;
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cifs_dbg(FYI, "attempting read on write only file instance\n");
ctx = cifs_aio_ctx_alloc();
if (!ctx)
return -ENOMEM;
ctx->cfile = cifsFileInfo_get(cfile);
if (!is_sync_kiocb(iocb))
ctx->iocb = iocb;
if (iter_is_iovec(to))
ctx->should_dirty = true;
if (direct) {
ctx->pos = offset;
ctx->direct_io = true;
ctx->iter = *to;
ctx->len = len;
} else {
rc = setup_aio_ctx_iter(ctx, to, READ);
if (rc) {
kref_put(&ctx->refcount, cifs_aio_ctx_release);
return rc;
}
len = ctx->len;
}
/* grab a lock here due to read response handlers can access ctx */
mutex_lock(&ctx->aio_mutex);
rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
/* if at least one read request send succeeded, then reset rc */
if (!list_empty(&ctx->list))
rc = 0;
mutex_unlock(&ctx->aio_mutex);
if (rc) {
kref_put(&ctx->refcount, cifs_aio_ctx_release);
return rc;
}
if (!is_sync_kiocb(iocb)) {
kref_put(&ctx->refcount, cifs_aio_ctx_release);
return -EIOCBQUEUED;
}
rc = wait_for_completion_killable(&ctx->done);
if (rc) {
mutex_lock(&ctx->aio_mutex);
ctx->rc = rc = -EINTR;
total_read = ctx->total_len;
mutex_unlock(&ctx->aio_mutex);
} else {
rc = ctx->rc;
total_read = ctx->total_len;
}
kref_put(&ctx->refcount, cifs_aio_ctx_release);
if (total_read) {
iocb->ki_pos += total_read;
return total_read;
}
return rc;
}
ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
{
return __cifs_readv(iocb, to, true);
}
ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
{
return __cifs_readv(iocb, to, false);
}
ssize_t
cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = (struct cifsFileInfo *)
iocb->ki_filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = -EACCES;
/*
* In strict cache mode we need to read from the server all the time
* if we don't have level II oplock because the server can delay mtime
* change - so we can't make a decision about inode invalidating.
* And we can also fail with pagereading if there are mandatory locks
* on pages affected by this read but not on the region from pos to
* pos+len-1.
*/
if (!CIFS_CACHE_READ(cinode))
return cifs_user_readv(iocb, to);
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
return generic_file_read_iter(iocb, to);
/*
* We need to hold the sem to be sure nobody modifies lock list
* with a brlock that prevents reading.
*/
down_read(&cinode->lock_sem);
if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
tcon->ses->server->vals->shared_lock_type,
0, NULL, CIFS_READ_OP))
rc = generic_file_read_iter(iocb, to);
up_read(&cinode->lock_sem);
return rc;
}
static ssize_t
cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
{
int rc = -EACCES;
unsigned int bytes_read = 0;
unsigned int total_read;
unsigned int current_read_size;
unsigned int rsize;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
unsigned int xid;
char *cur_offset;
struct cifsFileInfo *open_file;
struct cifs_io_parms io_parms = {0};
int buf_type = CIFS_NO_BUFFER;
__u32 pid;
xid = get_xid();
cifs_sb = CIFS_FILE_SB(file);
/* FIXME: set up handlers for larger reads and/or convert to async */
rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
if (file->private_data == NULL) {
cifs: Fix incorrect return code being printed in cFYI messages FreeXid() along with freeing Xid does add a cifsFYI debug message that prints rc (return code) as well. In some code paths where we set/return error code after calling FreeXid(), incorrect error code is being printed when cifsFYI is enabled. This could be misleading in few cases. For eg. In cifs_open() if cifs_fill_filedata() returns a valid pointer to cifsFileInfo, FreeXid() prints rc=-13 whereas 0 is actually being returned. Fix this by setting rc before calling FreeXid(). Basically convert FreeXid(xid); rc = -ERR; return -ERR; => FreeXid(xid); return rc; [Note that Christoph would like to replace the GetXid/FreeXid calls, which are primarily used for debugging. This seems like a good longer term goal, but although there is an alternative tracing facility, there are no examples yet available that I know of that we can use (yet) to convert this cifs function entry/exit logging, and for creating an identifier that we can use to correlate all dmesg log entries for a particular vfs operation (ie identify all log entries for a particular vfs request to cifs: e.g. a particular close or read or write or byte range lock call ... and just using the thread id is harder). Eventually when a replacement for this is available (e.g. when NFS switches over and various samples to look at in other file systems) we can remove the GetXid/FreeXid macro but in the meantime multiple people use this run time configurable logging all the time for debugging, and Suresh's patch fixes a problem which made it harder to notice some low memory problems in the log so it is worthwhile to fix this problem until a better logging approach is able to be used] Acked-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de> Signed-off-by: Steve French <sfrench@us.ibm.com>
2009-06-25 12:42:34 +00:00
rc = -EBADF;
free_xid(xid);
cifs: Fix incorrect return code being printed in cFYI messages FreeXid() along with freeing Xid does add a cifsFYI debug message that prints rc (return code) as well. In some code paths where we set/return error code after calling FreeXid(), incorrect error code is being printed when cifsFYI is enabled. This could be misleading in few cases. For eg. In cifs_open() if cifs_fill_filedata() returns a valid pointer to cifsFileInfo, FreeXid() prints rc=-13 whereas 0 is actually being returned. Fix this by setting rc before calling FreeXid(). Basically convert FreeXid(xid); rc = -ERR; return -ERR; => FreeXid(xid); return rc; [Note that Christoph would like to replace the GetXid/FreeXid calls, which are primarily used for debugging. This seems like a good longer term goal, but although there is an alternative tracing facility, there are no examples yet available that I know of that we can use (yet) to convert this cifs function entry/exit logging, and for creating an identifier that we can use to correlate all dmesg log entries for a particular vfs operation (ie identify all log entries for a particular vfs request to cifs: e.g. a particular close or read or write or byte range lock call ... and just using the thread id is harder). Eventually when a replacement for this is available (e.g. when NFS switches over and various samples to look at in other file systems) we can remove the GetXid/FreeXid macro but in the meantime multiple people use this run time configurable logging all the time for debugging, and Suresh's patch fixes a problem which made it harder to notice some low memory problems in the log so it is worthwhile to fix this problem until a better logging approach is able to be used] Acked-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de> Signed-off-by: Steve French <sfrench@us.ibm.com>
2009-06-25 12:42:34 +00:00
return rc;
}
open_file = file->private_data;
tcon = tlink_tcon(open_file->tlink);
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
server = cifs_pick_channel(tcon->ses);
if (!server->ops->sync_read) {
free_xid(xid);
return -ENOSYS;
}
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
else
pid = current->tgid;
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cifs_dbg(FYI, "attempting read on write only file instance\n");
for (total_read = 0, cur_offset = read_data; read_size > total_read;
total_read += bytes_read, cur_offset += bytes_read) {
do {
current_read_size = min_t(uint, read_size - total_read,
rsize);
/*
* For windows me and 9x we do not want to request more
* than it negotiated since it will refuse the read
* then.
*/
if (!(tcon->ses->capabilities &
tcon->ses->server->vals->cap_large_files)) {
current_read_size = min_t(uint,
current_read_size, CIFSMaxBufSize);
}
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, true);
if (rc != 0)
break;
}
io_parms.pid = pid;
io_parms.tcon = tcon;
io_parms.offset = *offset;
io_parms.length = current_read_size;
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
io_parms.server = server;
rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
&bytes_read, &cur_offset,
&buf_type);
} while (rc == -EAGAIN);
if (rc || (bytes_read == 0)) {
if (total_read) {
break;
} else {
free_xid(xid);
return rc;
}
} else {
cifs_stats_bytes_read(tcon, total_read);
*offset += bytes_read;
}
}
free_xid(xid);
return total_read;
}
/*
* If the page is mmap'ed into a process' page tables, then we need to make
* sure that it doesn't change while being written back.
*/
static vm_fault_t
cifs_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct file *file = vmf->vma->vm_file;
struct inode *inode = file_inode(file);
cifs_fscache_wait_on_page_write(inode, page);
lock_page(page);
return VM_FAULT_LOCKED;
}
static const struct vm_operations_struct cifs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = cifs_page_mkwrite,
};
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
{
int xid, rc = 0;
struct inode *inode = file_inode(file);
xid = get_xid();
if (!CIFS_CACHE_READ(CIFS_I(inode)))
rc = cifs_zap_mapping(inode);
if (!rc)
rc = generic_file_mmap(file, vma);
if (!rc)
vma->vm_ops = &cifs_file_vm_ops;
free_xid(xid);
return rc;
}
int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
int rc, xid;
xid = get_xid();
rc = cifs_revalidate_file(file);
if (rc)
cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
rc);
if (!rc)
rc = generic_file_mmap(file, vma);
if (!rc)
vma->vm_ops = &cifs_file_vm_ops;
free_xid(xid);
return rc;
}
static void
cifs_readv_complete(struct work_struct *work)
{
unsigned int i, got_bytes;
struct cifs_readdata *rdata = container_of(work,
struct cifs_readdata, work);
got_bytes = rdata->got_bytes;
for (i = 0; i < rdata->nr_pages; i++) {
struct page *page = rdata->pages[i];
lru_cache_add(page);
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes)) {
flush_dcache_page(page);
SetPageUptodate(page);
} else
SetPageError(page);
unlock_page(page);
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes))
cifs_readpage_to_fscache(rdata->mapping->host, page);
else
cifs_fscache_uncache_page(rdata->mapping->host, page);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
put_page(page);
rdata->pages[i] = NULL;
}
kref_put(&rdata->refcount, cifs_readdata_release);
}
static int
readpages_fill_pages(struct TCP_Server_Info *server,
struct cifs_readdata *rdata, struct iov_iter *iter,
unsigned int len)
{
int result = 0;
unsigned int i;
u64 eof;
pgoff_t eof_index;
unsigned int nr_pages = rdata->nr_pages;
unsigned int page_offset = rdata->page_offset;
/* determine the eof that the server (probably) has */
eof = CIFS_I(rdata->mapping->host)->server_eof;
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
rdata->got_bytes = 0;
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
rdata->tailsz = PAGE_SIZE;
for (i = 0; i < nr_pages; i++) {
struct page *page = rdata->pages[i];
unsigned int to_read = rdata->pagesz;
size_t n;
if (i == 0)
to_read -= page_offset;
else
page_offset = 0;
n = to_read;
if (len >= to_read) {
len -= to_read;
} else if (len > 0) {
/* enough for partial page, fill and zero the rest */
zero_user(page, len + page_offset, to_read - len);
n = rdata->tailsz = len;
len = 0;
} else if (page->index > eof_index) {
/*
* The VFS will not try to do readahead past the
* i_size, but it's possible that we have outstanding
* writes with gaps in the middle and the i_size hasn't
* caught up yet. Populate those with zeroed out pages
* to prevent the VFS from repeatedly attempting to
* fill them until the writes are flushed.
*/
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
zero_user(page, 0, PAGE_SIZE);
lru_cache_add(page);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
put_page(page);
rdata->pages[i] = NULL;
rdata->nr_pages--;
continue;
} else {
/* no need to hold page hostage */
lru_cache_add(page);
unlock_page(page);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
put_page(page);
rdata->pages[i] = NULL;
rdata->nr_pages--;
continue;
}
if (iter)
result = copy_page_from_iter(
page, page_offset, n, iter);
#ifdef CONFIG_CIFS_SMB_DIRECT
else if (rdata->mr)
result = n;
#endif
else
result = cifs_read_page_from_socket(
server, page, page_offset, n);
if (result < 0)
break;
rdata->got_bytes += result;
}
return rdata->got_bytes > 0 && result != -ECONNABORTED ?
rdata->got_bytes : result;
}
static int
cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
struct cifs_readdata *rdata, unsigned int len)
{
return readpages_fill_pages(server, rdata, NULL, len);
}
static int
cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
struct cifs_readdata *rdata,
struct iov_iter *iter)
{
return readpages_fill_pages(server, rdata, iter, iter->count);
}
static int
readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
unsigned int rsize, struct list_head *tmplist,
unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
{
struct page *page, *tpage;
unsigned int expected_index;
int rc;
mm, memcg: use consistent gfp flags during readahead Vladimir has noticed that we might declare memcg oom even during readahead because read_pages only uses GFP_KERNEL (with mapping_gfp restriction) while __do_page_cache_readahead uses page_cache_alloc_readahead which adds __GFP_NORETRY to prevent from OOMs. This gfp mask discrepancy is really unfortunate and easily fixable. Drop page_cache_alloc_readahead() which only has one user and outsource the gfp_mask logic into readahead_gfp_mask and propagate this mask from __do_page_cache_readahead down to read_pages. This alone would have only very limited impact as most filesystems are implementing ->readpages and the common implementation mpage_readpages does GFP_KERNEL (with mapping_gfp restriction) again. We can tell it to use readahead_gfp_mask instead as this function is called only during readahead as well. The same applies to read_cache_pages. ext4 has its own ext4_mpage_readpages but the path which has pages != NULL can use the same gfp mask. Btrfs, cifs, f2fs and orangefs are doing a very similar pattern to mpage_readpages so the same can be applied to them as well. [akpm@linux-foundation.org: coding-style fixes] [mhocko@suse.com: restrict gfp mask in mpage_alloc] Link: http://lkml.kernel.org/r/20160610074223.GC32285@dhcp22.suse.cz Link: http://lkml.kernel.org/r/1465301556-26431-1-git-send-email-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Cc: Vladimir Davydov <vdavydov@parallels.com> Cc: Chris Mason <clm@fb.com> Cc: Steve French <sfrench@samba.org> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Jan Kara <jack@suse.cz> Cc: Mike Marshall <hubcap@omnibond.com> Cc: Jaegeuk Kim <jaegeuk@kernel.org> Cc: Changman Lee <cm224.lee@samsung.com> Cc: Chao Yu <yuchao0@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-07-26 22:24:53 +00:00
gfp_t gfp = readahead_gfp_mask(mapping);
INIT_LIST_HEAD(tmplist);
page = lru_to_page(page_list);
/*
* Lock the page and put it in the cache. Since no one else
* should have access to this page, we're safe to simply set
* PG_locked without checking it first.
*/
__SetPageLocked(page);
rc = add_to_page_cache_locked(page, mapping,
mm, fs: obey gfp_mapping for add_to_page_cache() Commit 6afdb859b710 ("mm: do not ignore mapping_gfp_mask in page cache allocation paths") has caught some users of hardcoded GFP_KERNEL used in the page cache allocation paths. This, however, wasn't complete and there were others which went unnoticed. Dave Chinner has reported the following deadlock for xfs on loop device: : With the recent merge of the loop device changes, I'm now seeing : XFS deadlock on my single CPU, 1GB RAM VM running xfs/073. : : The deadlocked is as follows: : : kloopd1: loop_queue_read_work : xfs_file_iter_read : lock XFS inode XFS_IOLOCK_SHARED (on image file) : page cache read (GFP_KERNEL) : radix tree alloc : memory reclaim : reclaim XFS inodes : log force to unpin inodes : <wait for log IO completion> : : xfs-cil/loop1: <does log force IO work> : xlog_cil_push : xlog_write : <loop issuing log writes> : xlog_state_get_iclog_space() : <blocks due to all log buffers under write io> : <waits for IO completion> : : kloopd1: loop_queue_write_work : xfs_file_write_iter : lock XFS inode XFS_IOLOCK_EXCL (on image file) : <wait for inode to be unlocked> : : i.e. the kloopd, with it's split read and write work queues, has : introduced a dependency through memory reclaim. i.e. that writes : need to be able to progress for reads make progress. : : The problem, fundamentally, is that mpage_readpages() does a : GFP_KERNEL allocation, rather than paying attention to the inode's : mapping gfp mask, which is set to GFP_NOFS. : : The didn't used to happen, because the loop device used to issue : reads through the splice path and that does: : : error = add_to_page_cache_lru(page, mapping, index, : GFP_KERNEL & mapping_gfp_mask(mapping)); This has changed by commit aa4d86163e4 ("block: loop: switch to VFS ITER_BVEC"). This patch changes mpage_readpage{s} to follow gfp mask set for the mapping. There are, however, other places which are doing basically the same. lustre:ll_dir_filler is doing GFP_KERNEL from the function which apparently uses GFP_NOFS for other allocations so let's make this consistent. cifs:readpages_get_pages is called from cifs_readpages and __cifs_readpages_from_fscache called from the same path obeys mapping gfp. ramfs_nommu_expand_for_mapping is hardcoding GFP_KERNEL as well regardless it uses mapping_gfp_mask for the page allocation. ext4_mpage_readpages is the called from the page cache allocation path same as read_pages and read_cache_pages As I've noticed in my previous post I cannot say I would be happy about sprinkling mapping_gfp_mask all over the place and it sounds like we should drop gfp_mask argument altogether and use it internally in __add_to_page_cache_locked that would require all the filesystems to use mapping gfp consistently which I am not sure is the case here. From a quick glance it seems that some file system use it all the time while others are selective. Signed-off-by: Michal Hocko <mhocko@suse.com> Reported-by: Dave Chinner <david@fromorbit.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Ming Lei <ming.lei@canonical.com> Cc: Andreas Dilger <andreas.dilger@intel.com> Cc: Oleg Drokin <oleg.drokin@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-10-15 22:28:24 +00:00
page->index, gfp);
/* give up if we can't stick it in the cache */
if (rc) {
__ClearPageLocked(page);
return rc;
}
/* move first page to the tmplist */
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
*offset = (loff_t)page->index << PAGE_SHIFT;
*bytes = PAGE_SIZE;
*nr_pages = 1;
list_move_tail(&page->lru, tmplist);
/* now try and add more pages onto the request */
expected_index = page->index + 1;
list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
/* discontinuity ? */
if (page->index != expected_index)
break;
/* would this page push the read over the rsize? */
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
if (*bytes + PAGE_SIZE > rsize)
break;
__SetPageLocked(page);
cifs: Fix double add page to memcg when cifs_readpages When xfstests generic/451, there is an BUG at mm/memcontrol.c: page:ffffea000560f2c0 refcount:2 mapcount:0 mapping:000000008544e0ea index:0xf mapping->aops:cifs_addr_ops dentry name:"tst-aio-dio-cycle-write.451" flags: 0x2fffff80000001(locked) raw: 002fffff80000001 ffffc90002023c50 ffffea0005280088 ffff88815cda0210 raw: 000000000000000f 0000000000000000 00000002ffffffff ffff88817287d000 page dumped because: VM_BUG_ON_PAGE(page->mem_cgroup) page->mem_cgroup:ffff88817287d000 ------------[ cut here ]------------ kernel BUG at mm/memcontrol.c:2659! invalid opcode: 0000 [#1] SMP CPU: 2 PID: 2038 Comm: xfs_io Not tainted 5.8.0-rc1 #44 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_ 073836-buildvm-ppc64le-16.ppc.4 RIP: 0010:commit_charge+0x35/0x50 Code: 0d 48 83 05 54 b2 02 05 01 48 89 77 38 c3 48 c7 c6 78 4a ea ba 48 83 05 38 b2 02 05 01 e8 63 0d9 RSP: 0018:ffffc90002023a50 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffff88817287d000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff88817ac97ea0 RDI: ffff88817ac97ea0 RBP: ffffea000560f2c0 R08: 0000000000000203 R09: 0000000000000005 R10: 0000000000000030 R11: ffffc900020237a8 R12: 0000000000000000 R13: 0000000000000001 R14: 0000000000000001 R15: ffff88815a1272c0 FS: 00007f5071ab0800(0000) GS:ffff88817ac80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055efcd5ca000 CR3: 000000015d312000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: mem_cgroup_charge+0x166/0x4f0 __add_to_page_cache_locked+0x4a9/0x710 add_to_page_cache_locked+0x15/0x20 cifs_readpages+0x217/0x1270 read_pages+0x29a/0x670 page_cache_readahead_unbounded+0x24f/0x390 __do_page_cache_readahead+0x3f/0x60 ondemand_readahead+0x1f1/0x470 page_cache_async_readahead+0x14c/0x170 generic_file_buffered_read+0x5df/0x1100 generic_file_read_iter+0x10c/0x1d0 cifs_strict_readv+0x139/0x170 new_sync_read+0x164/0x250 __vfs_read+0x39/0x60 vfs_read+0xb5/0x1e0 ksys_pread64+0x85/0xf0 __x64_sys_pread64+0x22/0x30 do_syscall_64+0x69/0x150 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f5071fcb1af Code: Bad RIP value. RSP: 002b:00007ffde2cdb8e0 EFLAGS: 00000293 ORIG_RAX: 0000000000000011 RAX: ffffffffffffffda RBX: 00007ffde2cdb990 RCX: 00007f5071fcb1af RDX: 0000000000001000 RSI: 000055efcd5ca000 RDI: 0000000000000003 RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000001000 R11: 0000000000000293 R12: 0000000000000001 R13: 000000000009f000 R14: 0000000000000000 R15: 0000000000001000 Modules linked in: ---[ end trace 725fa14a3e1af65c ]--- Since commit 3fea5a499d57 ("mm: memcontrol: convert page cache to a new mem_cgroup_charge() API") not cancel the page charge, the pages maybe double add to pagecache: thread1 | thread2 cifs_readpages readpages_get_pages add_to_page_cache_locked(head,index=n)=0 | readpages_get_pages | add_to_page_cache_locked(head,index=n+1)=0 add_to_page_cache_locked(head, index=n+1)=-EEXIST then, will next loop with list head page's index=n+1 and the page->mapping not NULL readpages_get_pages add_to_page_cache_locked(head, index=n+1) commit_charge VM_BUG_ON_PAGE So, we should not do the next loop when any page add to page cache failed. Reported-by: Hulk Robot <hulkci@huawei.com> Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com> Signed-off-by: Steve French <stfrench@microsoft.com> Acked-by: Ronnie Sahlberg <lsahlber@redhat.com>
2020-06-22 09:30:19 +00:00
rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
if (rc) {
__ClearPageLocked(page);
break;
}
list_move_tail(&page->lru, tmplist);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
(*bytes) += PAGE_SIZE;
expected_index++;
(*nr_pages)++;
}
return rc;
}
static int cifs_readpages(struct file *file, struct address_space *mapping,
struct list_head *page_list, unsigned num_pages)
{
int rc;
cifs: Fix double add page to memcg when cifs_readpages When xfstests generic/451, there is an BUG at mm/memcontrol.c: page:ffffea000560f2c0 refcount:2 mapcount:0 mapping:000000008544e0ea index:0xf mapping->aops:cifs_addr_ops dentry name:"tst-aio-dio-cycle-write.451" flags: 0x2fffff80000001(locked) raw: 002fffff80000001 ffffc90002023c50 ffffea0005280088 ffff88815cda0210 raw: 000000000000000f 0000000000000000 00000002ffffffff ffff88817287d000 page dumped because: VM_BUG_ON_PAGE(page->mem_cgroup) page->mem_cgroup:ffff88817287d000 ------------[ cut here ]------------ kernel BUG at mm/memcontrol.c:2659! invalid opcode: 0000 [#1] SMP CPU: 2 PID: 2038 Comm: xfs_io Not tainted 5.8.0-rc1 #44 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_ 073836-buildvm-ppc64le-16.ppc.4 RIP: 0010:commit_charge+0x35/0x50 Code: 0d 48 83 05 54 b2 02 05 01 48 89 77 38 c3 48 c7 c6 78 4a ea ba 48 83 05 38 b2 02 05 01 e8 63 0d9 RSP: 0018:ffffc90002023a50 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffff88817287d000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff88817ac97ea0 RDI: ffff88817ac97ea0 RBP: ffffea000560f2c0 R08: 0000000000000203 R09: 0000000000000005 R10: 0000000000000030 R11: ffffc900020237a8 R12: 0000000000000000 R13: 0000000000000001 R14: 0000000000000001 R15: ffff88815a1272c0 FS: 00007f5071ab0800(0000) GS:ffff88817ac80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055efcd5ca000 CR3: 000000015d312000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: mem_cgroup_charge+0x166/0x4f0 __add_to_page_cache_locked+0x4a9/0x710 add_to_page_cache_locked+0x15/0x20 cifs_readpages+0x217/0x1270 read_pages+0x29a/0x670 page_cache_readahead_unbounded+0x24f/0x390 __do_page_cache_readahead+0x3f/0x60 ondemand_readahead+0x1f1/0x470 page_cache_async_readahead+0x14c/0x170 generic_file_buffered_read+0x5df/0x1100 generic_file_read_iter+0x10c/0x1d0 cifs_strict_readv+0x139/0x170 new_sync_read+0x164/0x250 __vfs_read+0x39/0x60 vfs_read+0xb5/0x1e0 ksys_pread64+0x85/0xf0 __x64_sys_pread64+0x22/0x30 do_syscall_64+0x69/0x150 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f5071fcb1af Code: Bad RIP value. RSP: 002b:00007ffde2cdb8e0 EFLAGS: 00000293 ORIG_RAX: 0000000000000011 RAX: ffffffffffffffda RBX: 00007ffde2cdb990 RCX: 00007f5071fcb1af RDX: 0000000000001000 RSI: 000055efcd5ca000 RDI: 0000000000000003 RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000001000 R11: 0000000000000293 R12: 0000000000000001 R13: 000000000009f000 R14: 0000000000000000 R15: 0000000000001000 Modules linked in: ---[ end trace 725fa14a3e1af65c ]--- Since commit 3fea5a499d57 ("mm: memcontrol: convert page cache to a new mem_cgroup_charge() API") not cancel the page charge, the pages maybe double add to pagecache: thread1 | thread2 cifs_readpages readpages_get_pages add_to_page_cache_locked(head,index=n)=0 | readpages_get_pages | add_to_page_cache_locked(head,index=n+1)=0 add_to_page_cache_locked(head, index=n+1)=-EEXIST then, will next loop with list head page's index=n+1 and the page->mapping not NULL readpages_get_pages add_to_page_cache_locked(head, index=n+1) commit_charge VM_BUG_ON_PAGE So, we should not do the next loop when any page add to page cache failed. Reported-by: Hulk Robot <hulkci@huawei.com> Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com> Signed-off-by: Steve French <stfrench@microsoft.com> Acked-by: Ronnie Sahlberg <lsahlber@redhat.com>
2020-06-22 09:30:19 +00:00
int err = 0;
struct list_head tmplist;
struct cifsFileInfo *open_file = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
struct TCP_Server_Info *server;
pid_t pid;
unsigned int xid;
xid = get_xid();
/*
* Reads as many pages as possible from fscache. Returns -ENOBUFS
* immediately if the cookie is negative
*
* After this point, every page in the list might have PG_fscache set,
* so we will need to clean that up off of every page we don't use.
*/
rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
&num_pages);
if (rc == 0) {
free_xid(xid);
return rc;
}
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
else
pid = current->tgid;
rc = 0;
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
__func__, file, mapping, num_pages);
/*
* Start with the page at end of list and move it to private
* list. Do the same with any following pages until we hit
* the rsize limit, hit an index discontinuity, or run out of
* pages. Issue the async read and then start the loop again
* until the list is empty.
*
* Note that list order is important. The page_list is in
* the order of declining indexes. When we put the pages in
* the rdata->pages, then we want them in increasing order.
*/
cifs: Fix double add page to memcg when cifs_readpages When xfstests generic/451, there is an BUG at mm/memcontrol.c: page:ffffea000560f2c0 refcount:2 mapcount:0 mapping:000000008544e0ea index:0xf mapping->aops:cifs_addr_ops dentry name:"tst-aio-dio-cycle-write.451" flags: 0x2fffff80000001(locked) raw: 002fffff80000001 ffffc90002023c50 ffffea0005280088 ffff88815cda0210 raw: 000000000000000f 0000000000000000 00000002ffffffff ffff88817287d000 page dumped because: VM_BUG_ON_PAGE(page->mem_cgroup) page->mem_cgroup:ffff88817287d000 ------------[ cut here ]------------ kernel BUG at mm/memcontrol.c:2659! invalid opcode: 0000 [#1] SMP CPU: 2 PID: 2038 Comm: xfs_io Not tainted 5.8.0-rc1 #44 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_ 073836-buildvm-ppc64le-16.ppc.4 RIP: 0010:commit_charge+0x35/0x50 Code: 0d 48 83 05 54 b2 02 05 01 48 89 77 38 c3 48 c7 c6 78 4a ea ba 48 83 05 38 b2 02 05 01 e8 63 0d9 RSP: 0018:ffffc90002023a50 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffff88817287d000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff88817ac97ea0 RDI: ffff88817ac97ea0 RBP: ffffea000560f2c0 R08: 0000000000000203 R09: 0000000000000005 R10: 0000000000000030 R11: ffffc900020237a8 R12: 0000000000000000 R13: 0000000000000001 R14: 0000000000000001 R15: ffff88815a1272c0 FS: 00007f5071ab0800(0000) GS:ffff88817ac80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055efcd5ca000 CR3: 000000015d312000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: mem_cgroup_charge+0x166/0x4f0 __add_to_page_cache_locked+0x4a9/0x710 add_to_page_cache_locked+0x15/0x20 cifs_readpages+0x217/0x1270 read_pages+0x29a/0x670 page_cache_readahead_unbounded+0x24f/0x390 __do_page_cache_readahead+0x3f/0x60 ondemand_readahead+0x1f1/0x470 page_cache_async_readahead+0x14c/0x170 generic_file_buffered_read+0x5df/0x1100 generic_file_read_iter+0x10c/0x1d0 cifs_strict_readv+0x139/0x170 new_sync_read+0x164/0x250 __vfs_read+0x39/0x60 vfs_read+0xb5/0x1e0 ksys_pread64+0x85/0xf0 __x64_sys_pread64+0x22/0x30 do_syscall_64+0x69/0x150 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f5071fcb1af Code: Bad RIP value. RSP: 002b:00007ffde2cdb8e0 EFLAGS: 00000293 ORIG_RAX: 0000000000000011 RAX: ffffffffffffffda RBX: 00007ffde2cdb990 RCX: 00007f5071fcb1af RDX: 0000000000001000 RSI: 000055efcd5ca000 RDI: 0000000000000003 RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000001000 R11: 0000000000000293 R12: 0000000000000001 R13: 000000000009f000 R14: 0000000000000000 R15: 0000000000001000 Modules linked in: ---[ end trace 725fa14a3e1af65c ]--- Since commit 3fea5a499d57 ("mm: memcontrol: convert page cache to a new mem_cgroup_charge() API") not cancel the page charge, the pages maybe double add to pagecache: thread1 | thread2 cifs_readpages readpages_get_pages add_to_page_cache_locked(head,index=n)=0 | readpages_get_pages | add_to_page_cache_locked(head,index=n+1)=0 add_to_page_cache_locked(head, index=n+1)=-EEXIST then, will next loop with list head page's index=n+1 and the page->mapping not NULL readpages_get_pages add_to_page_cache_locked(head, index=n+1) commit_charge VM_BUG_ON_PAGE So, we should not do the next loop when any page add to page cache failed. Reported-by: Hulk Robot <hulkci@huawei.com> Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com> Signed-off-by: Steve French <stfrench@microsoft.com> Acked-by: Ronnie Sahlberg <lsahlber@redhat.com>
2020-06-22 09:30:19 +00:00
while (!list_empty(page_list) && !err) {
unsigned int i, nr_pages, bytes, rsize;
loff_t offset;
struct page *page, *tpage;
struct cifs_readdata *rdata;
struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack;
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, true);
if (rc == -EAGAIN)
continue;
else if (rc)
break;
}
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
&rsize, credits);
if (rc)
break;
/*
* Give up immediately if rsize is too small to read an entire
* page. The VFS will fall back to readpage. We should never
* reach this point however since we set ra_pages to 0 when the
* rsize is smaller than a cache page.
*/
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
if (unlikely(rsize < PAGE_SIZE)) {
add_credits_and_wake_if(server, credits, 0);
free_xid(xid);
return 0;
}
cifs: Fix double add page to memcg when cifs_readpages When xfstests generic/451, there is an BUG at mm/memcontrol.c: page:ffffea000560f2c0 refcount:2 mapcount:0 mapping:000000008544e0ea index:0xf mapping->aops:cifs_addr_ops dentry name:"tst-aio-dio-cycle-write.451" flags: 0x2fffff80000001(locked) raw: 002fffff80000001 ffffc90002023c50 ffffea0005280088 ffff88815cda0210 raw: 000000000000000f 0000000000000000 00000002ffffffff ffff88817287d000 page dumped because: VM_BUG_ON_PAGE(page->mem_cgroup) page->mem_cgroup:ffff88817287d000 ------------[ cut here ]------------ kernel BUG at mm/memcontrol.c:2659! invalid opcode: 0000 [#1] SMP CPU: 2 PID: 2038 Comm: xfs_io Not tainted 5.8.0-rc1 #44 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_ 073836-buildvm-ppc64le-16.ppc.4 RIP: 0010:commit_charge+0x35/0x50 Code: 0d 48 83 05 54 b2 02 05 01 48 89 77 38 c3 48 c7 c6 78 4a ea ba 48 83 05 38 b2 02 05 01 e8 63 0d9 RSP: 0018:ffffc90002023a50 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffff88817287d000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff88817ac97ea0 RDI: ffff88817ac97ea0 RBP: ffffea000560f2c0 R08: 0000000000000203 R09: 0000000000000005 R10: 0000000000000030 R11: ffffc900020237a8 R12: 0000000000000000 R13: 0000000000000001 R14: 0000000000000001 R15: ffff88815a1272c0 FS: 00007f5071ab0800(0000) GS:ffff88817ac80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055efcd5ca000 CR3: 000000015d312000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: mem_cgroup_charge+0x166/0x4f0 __add_to_page_cache_locked+0x4a9/0x710 add_to_page_cache_locked+0x15/0x20 cifs_readpages+0x217/0x1270 read_pages+0x29a/0x670 page_cache_readahead_unbounded+0x24f/0x390 __do_page_cache_readahead+0x3f/0x60 ondemand_readahead+0x1f1/0x470 page_cache_async_readahead+0x14c/0x170 generic_file_buffered_read+0x5df/0x1100 generic_file_read_iter+0x10c/0x1d0 cifs_strict_readv+0x139/0x170 new_sync_read+0x164/0x250 __vfs_read+0x39/0x60 vfs_read+0xb5/0x1e0 ksys_pread64+0x85/0xf0 __x64_sys_pread64+0x22/0x30 do_syscall_64+0x69/0x150 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f5071fcb1af Code: Bad RIP value. RSP: 002b:00007ffde2cdb8e0 EFLAGS: 00000293 ORIG_RAX: 0000000000000011 RAX: ffffffffffffffda RBX: 00007ffde2cdb990 RCX: 00007f5071fcb1af RDX: 0000000000001000 RSI: 000055efcd5ca000 RDI: 0000000000000003 RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000001000 R11: 0000000000000293 R12: 0000000000000001 R13: 000000000009f000 R14: 0000000000000000 R15: 0000000000001000 Modules linked in: ---[ end trace 725fa14a3e1af65c ]--- Since commit 3fea5a499d57 ("mm: memcontrol: convert page cache to a new mem_cgroup_charge() API") not cancel the page charge, the pages maybe double add to pagecache: thread1 | thread2 cifs_readpages readpages_get_pages add_to_page_cache_locked(head,index=n)=0 | readpages_get_pages | add_to_page_cache_locked(head,index=n+1)=0 add_to_page_cache_locked(head, index=n+1)=-EEXIST then, will next loop with list head page's index=n+1 and the page->mapping not NULL readpages_get_pages add_to_page_cache_locked(head, index=n+1) commit_charge VM_BUG_ON_PAGE So, we should not do the next loop when any page add to page cache failed. Reported-by: Hulk Robot <hulkci@huawei.com> Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com> Signed-off-by: Steve French <stfrench@microsoft.com> Acked-by: Ronnie Sahlberg <lsahlber@redhat.com>
2020-06-22 09:30:19 +00:00
nr_pages = 0;
err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
&nr_pages, &offset, &bytes);
cifs: Fix double add page to memcg when cifs_readpages When xfstests generic/451, there is an BUG at mm/memcontrol.c: page:ffffea000560f2c0 refcount:2 mapcount:0 mapping:000000008544e0ea index:0xf mapping->aops:cifs_addr_ops dentry name:"tst-aio-dio-cycle-write.451" flags: 0x2fffff80000001(locked) raw: 002fffff80000001 ffffc90002023c50 ffffea0005280088 ffff88815cda0210 raw: 000000000000000f 0000000000000000 00000002ffffffff ffff88817287d000 page dumped because: VM_BUG_ON_PAGE(page->mem_cgroup) page->mem_cgroup:ffff88817287d000 ------------[ cut here ]------------ kernel BUG at mm/memcontrol.c:2659! invalid opcode: 0000 [#1] SMP CPU: 2 PID: 2038 Comm: xfs_io Not tainted 5.8.0-rc1 #44 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_ 073836-buildvm-ppc64le-16.ppc.4 RIP: 0010:commit_charge+0x35/0x50 Code: 0d 48 83 05 54 b2 02 05 01 48 89 77 38 c3 48 c7 c6 78 4a ea ba 48 83 05 38 b2 02 05 01 e8 63 0d9 RSP: 0018:ffffc90002023a50 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffff88817287d000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff88817ac97ea0 RDI: ffff88817ac97ea0 RBP: ffffea000560f2c0 R08: 0000000000000203 R09: 0000000000000005 R10: 0000000000000030 R11: ffffc900020237a8 R12: 0000000000000000 R13: 0000000000000001 R14: 0000000000000001 R15: ffff88815a1272c0 FS: 00007f5071ab0800(0000) GS:ffff88817ac80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055efcd5ca000 CR3: 000000015d312000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: mem_cgroup_charge+0x166/0x4f0 __add_to_page_cache_locked+0x4a9/0x710 add_to_page_cache_locked+0x15/0x20 cifs_readpages+0x217/0x1270 read_pages+0x29a/0x670 page_cache_readahead_unbounded+0x24f/0x390 __do_page_cache_readahead+0x3f/0x60 ondemand_readahead+0x1f1/0x470 page_cache_async_readahead+0x14c/0x170 generic_file_buffered_read+0x5df/0x1100 generic_file_read_iter+0x10c/0x1d0 cifs_strict_readv+0x139/0x170 new_sync_read+0x164/0x250 __vfs_read+0x39/0x60 vfs_read+0xb5/0x1e0 ksys_pread64+0x85/0xf0 __x64_sys_pread64+0x22/0x30 do_syscall_64+0x69/0x150 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f5071fcb1af Code: Bad RIP value. RSP: 002b:00007ffde2cdb8e0 EFLAGS: 00000293 ORIG_RAX: 0000000000000011 RAX: ffffffffffffffda RBX: 00007ffde2cdb990 RCX: 00007f5071fcb1af RDX: 0000000000001000 RSI: 000055efcd5ca000 RDI: 0000000000000003 RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000001000 R11: 0000000000000293 R12: 0000000000000001 R13: 000000000009f000 R14: 0000000000000000 R15: 0000000000001000 Modules linked in: ---[ end trace 725fa14a3e1af65c ]--- Since commit 3fea5a499d57 ("mm: memcontrol: convert page cache to a new mem_cgroup_charge() API") not cancel the page charge, the pages maybe double add to pagecache: thread1 | thread2 cifs_readpages readpages_get_pages add_to_page_cache_locked(head,index=n)=0 | readpages_get_pages | add_to_page_cache_locked(head,index=n+1)=0 add_to_page_cache_locked(head, index=n+1)=-EEXIST then, will next loop with list head page's index=n+1 and the page->mapping not NULL readpages_get_pages add_to_page_cache_locked(head, index=n+1) commit_charge VM_BUG_ON_PAGE So, we should not do the next loop when any page add to page cache failed. Reported-by: Hulk Robot <hulkci@huawei.com> Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com> Signed-off-by: Steve French <stfrench@microsoft.com> Acked-by: Ronnie Sahlberg <lsahlber@redhat.com>
2020-06-22 09:30:19 +00:00
if (!nr_pages) {
add_credits_and_wake_if(server, credits, 0);
break;
}
rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
if (!rdata) {
/* best to give up if we're out of mem */
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
list_del(&page->lru);
lru_cache_add(page);
unlock_page(page);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
put_page(page);
}
rc = -ENOMEM;
add_credits_and_wake_if(server, credits, 0);
break;
}
rdata->cfile = cifsFileInfo_get(open_file);
cifs: multichannel: move channel selection above transport layer Move the channel (TCP_Server_Info*) selection from the tranport layer to higher in the call stack so that: - credit handling is done with the server that will actually be used to send. * ->wait_mtu_credit * ->set_credits / set_credits * ->add_credits / add_credits * add_credits_and_wake_if - potential reconnection (smb2_reconnect) done when initializing a request is checked and done with the server that will actually be used to send. To do this: - remove the cifs_pick_channel() call out of compound_send_recv() - select channel and pass it down by adding a cifs_pick_channel(ses) call in: - smb311_posix_mkdir - SMB2_open - SMB2_ioctl - __SMB2_close - query_info - SMB2_change_notify - SMB2_flush - smb2_async_readv (if none provided in context param) - SMB2_read (if none provided in context param) - smb2_async_writev (if none provided in context param) - SMB2_write (if none provided in context param) - SMB2_query_directory - send_set_info - SMB2_oplock_break - SMB311_posix_qfs_info - SMB2_QFS_info - SMB2_QFS_attr - smb2_lockv - SMB2_lease_break - smb2_compound_op - smb2_set_ea - smb2_ioctl_query_info - smb2_query_dir_first - smb2_query_info_comound - smb2_query_symlink - cifs_writepages - cifs_write_from_iter - cifs_send_async_read - cifs_read - cifs_readpages - add TCP_Server_Info *server param argument to: - cifs_send_recv - compound_send_recv - SMB2_open_init - SMB2_query_info_init - SMB2_set_info_init - SMB2_close_init - SMB2_ioctl_init - smb2_iotcl_req_init - SMB2_query_directory_init - SMB2_notify_init - SMB2_flush_init - build_qfs_info_req - smb2_hdr_assemble - smb2_reconnect - fill_small_buf - smb2_plain_req_init - __smb2_plain_req_init The read/write codepath is different than the rest as it is using pages, io iterators and async calls. To deal with those we add a server pointer in the cifs_writedata/cifs_readdata/cifs_io_parms context struct and set it in: - cifs_writepages (wdata) - cifs_write_from_iter (wdata) - cifs_readpages (rdata) - cifs_send_async_read (rdata) The [rw]data->server pointer is eventually copied to cifs_io_parms->server to pass it down to SMB2_read/SMB2_write. If SMB2_read/SMB2_write is called from a different place that doesn't set the server field it will pick a channel. Some places do not pick a channel and just use ses->server or cifs_ses_server(ses). All cifs_ses_server(ses) calls are in codepaths involving negprot/sess.setup. - SMB2_negotiate (binding channel) - SMB2_sess_alloc_buffer (binding channel) - SMB2_echo (uses provided one) - SMB2_logoff (uses master) - SMB2_tdis (uses master) (list not exhaustive) Signed-off-by: Aurelien Aptel <aaptel@suse.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2020-05-31 17:38:22 +00:00
rdata->server = server;
rdata->mapping = mapping;
rdata->offset = offset;
rdata->bytes = bytes;
rdata->pid = pid;
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
rdata->pagesz = PAGE_SIZE;
rdata->tailsz = PAGE_SIZE;
rdata->read_into_pages = cifs_readpages_read_into_pages;
rdata->copy_into_pages = cifs_readpages_copy_into_pages;
rdata->credits = credits_on_stack;
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
list_del(&page->lru);
rdata->pages[rdata->nr_pages++] = page;
}
rc = adjust_credits(server, &rdata->credits, rdata->bytes);
if (!rc) {
if (rdata->cfile->invalidHandle)
rc = -EAGAIN;
else
rc = server->ops->async_readv(rdata);
}
if (rc) {
add_credits_and_wake_if(server, &rdata->credits, 0);
for (i = 0; i < rdata->nr_pages; i++) {
page = rdata->pages[i];
lru_cache_add(page);
unlock_page(page);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
put_page(page);
}
/* Fallback to the readpage in error/reconnect cases */
kref_put(&rdata->refcount, cifs_readdata_release);
break;
}
kref_put(&rdata->refcount, cifs_readdata_release);
}
/* Any pages that have been shown to fscache but didn't get added to
* the pagecache must be uncached before they get returned to the
* allocator.
*/
cifs_fscache_readpages_cancel(mapping->host, page_list);
free_xid(xid);
return rc;
}
/*
* cifs_readpage_worker must be called with the page pinned
*/
static int cifs_readpage_worker(struct file *file, struct page *page,
loff_t *poffset)
{
char *read_data;
int rc;
/* Is the page cached? */
rc = cifs_readpage_from_fscache(file_inode(file), page);
if (rc == 0)
goto read_complete;
read_data = kmap(page);
/* for reads over a certain size could initiate async read ahead */
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
if (rc < 0)
goto io_error;
else
cifs_dbg(FYI, "Bytes read %d\n", rc);
/* we do not want atime to be less than mtime, it broke some apps */
file_inode(file)->i_atime = current_time(file_inode(file));
if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
file_inode(file)->i_atime = file_inode(file)->i_mtime;
else
file_inode(file)->i_atime = current_time(file_inode(file));
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
if (PAGE_SIZE > rc)
memset(read_data + rc, 0, PAGE_SIZE - rc);
flush_dcache_page(page);
SetPageUptodate(page);
/* send this page to the cache */
cifs_readpage_to_fscache(file_inode(file), page);
rc = 0;
io_error:
kunmap(page);
unlock_page(page);
read_complete:
return rc;
}
static int cifs_readpage(struct file *file, struct page *page)
{
loff_t offset = page_file_offset(page);
int rc = -EACCES;
unsigned int xid;
xid = get_xid();
if (file->private_data == NULL) {
cifs: Fix incorrect return code being printed in cFYI messages FreeXid() along with freeing Xid does add a cifsFYI debug message that prints rc (return code) as well. In some code paths where we set/return error code after calling FreeXid(), incorrect error code is being printed when cifsFYI is enabled. This could be misleading in few cases. For eg. In cifs_open() if cifs_fill_filedata() returns a valid pointer to cifsFileInfo, FreeXid() prints rc=-13 whereas 0 is actually being returned. Fix this by setting rc before calling FreeXid(). Basically convert FreeXid(xid); rc = -ERR; return -ERR; => FreeXid(xid); return rc; [Note that Christoph would like to replace the GetXid/FreeXid calls, which are primarily used for debugging. This seems like a good longer term goal, but although there is an alternative tracing facility, there are no examples yet available that I know of that we can use (yet) to convert this cifs function entry/exit logging, and for creating an identifier that we can use to correlate all dmesg log entries for a particular vfs operation (ie identify all log entries for a particular vfs request to cifs: e.g. a particular close or read or write or byte range lock call ... and just using the thread id is harder). Eventually when a replacement for this is available (e.g. when NFS switches over and various samples to look at in other file systems) we can remove the GetXid/FreeXid macro but in the meantime multiple people use this run time configurable logging all the time for debugging, and Suresh's patch fixes a problem which made it harder to notice some low memory problems in the log so it is worthwhile to fix this problem until a better logging approach is able to be used] Acked-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de> Signed-off-by: Steve French <sfrench@us.ibm.com>
2009-06-25 12:42:34 +00:00
rc = -EBADF;
free_xid(xid);
cifs: Fix incorrect return code being printed in cFYI messages FreeXid() along with freeing Xid does add a cifsFYI debug message that prints rc (return code) as well. In some code paths where we set/return error code after calling FreeXid(), incorrect error code is being printed when cifsFYI is enabled. This could be misleading in few cases. For eg. In cifs_open() if cifs_fill_filedata() returns a valid pointer to cifsFileInfo, FreeXid() prints rc=-13 whereas 0 is actually being returned. Fix this by setting rc before calling FreeXid(). Basically convert FreeXid(xid); rc = -ERR; return -ERR; => FreeXid(xid); return rc; [Note that Christoph would like to replace the GetXid/FreeXid calls, which are primarily used for debugging. This seems like a good longer term goal, but although there is an alternative tracing facility, there are no examples yet available that I know of that we can use (yet) to convert this cifs function entry/exit logging, and for creating an identifier that we can use to correlate all dmesg log entries for a particular vfs operation (ie identify all log entries for a particular vfs request to cifs: e.g. a particular close or read or write or byte range lock call ... and just using the thread id is harder). Eventually when a replacement for this is available (e.g. when NFS switches over and various samples to look at in other file systems) we can remove the GetXid/FreeXid macro but in the meantime multiple people use this run time configurable logging all the time for debugging, and Suresh's patch fixes a problem which made it harder to notice some low memory problems in the log so it is worthwhile to fix this problem until a better logging approach is able to be used] Acked-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de> Signed-off-by: Steve French <sfrench@us.ibm.com>
2009-06-25 12:42:34 +00:00
return rc;
}
cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
page, (int)offset, (int)offset);
rc = cifs_readpage_worker(file, page, &offset);
free_xid(xid);
return rc;
}
static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
{
struct cifsFileInfo *open_file;
cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic Commit 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect the openFileList, but missed a few places where cifs_inode->openFileList was enumerated. Change these remaining tcon->open_file_lock to cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change. [17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs] [17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40 [17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202 [17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340 [17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040 [17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40 [17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940 [17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428 [17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000 [17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0 [17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [17313.245759] PKRU: 55555554 [17313.245761] Call Trace: [17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs] [17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs] [17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0 [17313.245885] cifs_open+0x38f/0x990 [cifs] [17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs] [17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs] [17313.245960] ? do_dentry_open+0x132/0x330 [17313.245963] do_dentry_open+0x132/0x330 [17313.245969] path_openat+0x573/0x14d0 [17313.245974] do_filp_open+0x93/0x100 [17313.245979] ? __check_object_size+0xa3/0x181 [17313.245986] ? audit_alloc_name+0x7e/0xd0 [17313.245992] do_sys_open+0x184/0x220 [17313.245999] do_syscall_64+0x5b/0x1b0 Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") CC: Stable <stable@vger.kernel.org> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-10-03 05:16:27 +00:00
spin_lock(&cifs_inode->open_file_lock);
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic Commit 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect the openFileList, but missed a few places where cifs_inode->openFileList was enumerated. Change these remaining tcon->open_file_lock to cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change. [17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs] [17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40 [17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202 [17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340 [17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040 [17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40 [17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940 [17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428 [17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000 [17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0 [17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [17313.245759] PKRU: 55555554 [17313.245761] Call Trace: [17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs] [17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs] [17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0 [17313.245885] cifs_open+0x38f/0x990 [cifs] [17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs] [17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs] [17313.245960] ? do_dentry_open+0x132/0x330 [17313.245963] do_dentry_open+0x132/0x330 [17313.245969] path_openat+0x573/0x14d0 [17313.245974] do_filp_open+0x93/0x100 [17313.245979] ? __check_object_size+0xa3/0x181 [17313.245986] ? audit_alloc_name+0x7e/0xd0 [17313.245992] do_sys_open+0x184/0x220 [17313.245999] do_syscall_64+0x5b/0x1b0 Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") CC: Stable <stable@vger.kernel.org> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-10-03 05:16:27 +00:00
spin_unlock(&cifs_inode->open_file_lock);
return 1;
}
}
cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic Commit 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect the openFileList, but missed a few places where cifs_inode->openFileList was enumerated. Change these remaining tcon->open_file_lock to cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change. [17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs] [17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40 [17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202 [17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340 [17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040 [17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40 [17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940 [17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428 [17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000 [17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0 [17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [17313.245759] PKRU: 55555554 [17313.245761] Call Trace: [17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs] [17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs] [17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0 [17313.245885] cifs_open+0x38f/0x990 [cifs] [17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs] [17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs] [17313.245960] ? do_dentry_open+0x132/0x330 [17313.245963] do_dentry_open+0x132/0x330 [17313.245969] path_openat+0x573/0x14d0 [17313.245974] do_filp_open+0x93/0x100 [17313.245979] ? __check_object_size+0xa3/0x181 [17313.245986] ? audit_alloc_name+0x7e/0xd0 [17313.245992] do_sys_open+0x184/0x220 [17313.245999] do_syscall_64+0x5b/0x1b0 Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo") CC: Stable <stable@vger.kernel.org> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-10-03 05:16:27 +00:00
spin_unlock(&cifs_inode->open_file_lock);
return 0;
}
/* We do not want to update the file size from server for inodes
open for write - to avoid races with writepage extending
the file - in the future we could consider allowing
refreshing the inode only on increases in the file size
but this is tricky to do without racing with writebehind
page caching in the current Linux kernel design */
bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
{
if (!cifsInode)
return true;
if (is_inode_writable(cifsInode)) {
/* This inode is open for write at least once */
struct cifs_sb_info *cifs_sb;
cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
/* since no page cache to corrupt on directio
we can change size safely */
return true;
}
if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
return true;
return false;
} else
return true;
}
static int cifs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int oncethru = 0;
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
pgoff_t index = pos >> PAGE_SHIFT;
loff_t offset = pos & (PAGE_SIZE - 1);
loff_t page_start = pos & PAGE_MASK;
loff_t i_size;
struct page *page;
int rc = 0;
cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
start:
fs: symlink write_begin allocation context fix With the write_begin/write_end aops, page_symlink was broken because it could no longer pass a GFP_NOFS type mask into the point where the allocations happened. They are done in write_begin, which would always assume that the filesystem can be entered from reclaim. This bug could cause filesystem deadlocks. The funny thing with having a gfp_t mask there is that it doesn't really allow the caller to arbitrarily tinker with the context in which it can be called. It couldn't ever be GFP_ATOMIC, for example, because it needs to take the page lock. The only thing any callers care about is __GFP_FS anyway, so turn that into a single flag. Add a new flag for write_begin, AOP_FLAG_NOFS. Filesystems can now act on this flag in their write_begin function. Change __grab_cache_page to accept a nofs argument as well, to honour that flag (while we're there, change the name to grab_cache_page_write_begin which is more instructive and does away with random leading underscores). This is really a more flexible way to go in the end anyway -- if a filesystem happens to want any extra allocations aside from the pagecache ones in ints write_begin function, it may now use GFP_KERNEL (rather than GFP_NOFS) for common case allocations (eg. ocfs2_alloc_write_ctxt, for a random example). [kosaki.motohiro@jp.fujitsu.com: fix ubifs] [kosaki.motohiro@jp.fujitsu.com: fix fuse] Signed-off-by: Nick Piggin <npiggin@suse.de> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: <stable@kernel.org> [2.6.28.x] Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> [ Cleaned up the calling convention: just pass in the AOP flags untouched to the grab_cache_page_write_begin() function. That just simplifies everybody, and may even allow future expansion of the logic. - Linus ] Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-04 20:00:53 +00:00
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
rc = -ENOMEM;
goto out;
}
if (PageUptodate(page))
goto out;
/*
* If we write a full page it will be up to date, no need to read from
* the server. If the write is short, we'll end up doing a sync write
* instead.
*/
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
if (len == PAGE_SIZE)
goto out;
/*
* optimize away the read when we have an oplock, and we're not
* expecting to use any of the data we'd be reading in. That
* is, when the page lies beyond the EOF, or straddles the EOF
* and the write will cover all of the existing data.
*/
if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
i_size = i_size_read(mapping->host);
if (page_start >= i_size ||
(offset == 0 && (pos + len) >= i_size)) {
zero_user_segments(page, 0, offset,
offset + len,
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
PAGE_SIZE);
/*
* PageChecked means that the parts of the page
* to which we're not writing are considered up
* to date. Once the data is copied to the
* page, it can be set uptodate.
*/
SetPageChecked(page);
goto out;
}
}
if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
/*
* might as well read a page, it is fast enough. If we get
* an error, we don't need to return it. cifs_write_end will
* do a sync write instead since PG_uptodate isn't set.
*/
cifs_readpage_worker(file, page, &page_start);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
put_page(page);
oncethru = 1;
goto start;
} else {
/* we could try using another file handle if there is one -
but how would we lock it to prevent close of that handle
racing with this read? In any case
this will be written out by write_end so is fine */
}
out:
*pagep = page;
return rc;
}
static int cifs_release_page(struct page *page, gfp_t gfp)
{
if (PagePrivate(page))
return 0;
return cifs_fscache_release_page(page, gfp);
}
static void cifs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
if (offset == 0 && length == PAGE_SIZE)
cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
}
static int cifs_launder_page(struct page *page)
{
int rc = 0;
loff_t range_start = page_offset(page);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 0,
.range_start = range_start,
.range_end = range_end,
};
cifs_dbg(FYI, "Launder page: %p\n", page);
if (clear_page_dirty_for_io(page))
rc = cifs_writepage_locked(page, &wbc);
cifs_fscache_invalidate_page(page, page->mapping->host);
return rc;
}
void cifs_oplock_break(struct work_struct *work)
{
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
oplock_break);
struct inode *inode = d_inode(cfile->dentry);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
cifs: Wait for writebacks to complete before attempting write. Problem reported in Red Hat bz 1040329 for strict writes where we cache only when we hold oplock and write direct to the server when we don't. When we receive an oplock break, we first change the oplock value for the inode in cifsInodeInfo->oplock to indicate that we no longer hold the oplock before we enqueue a task to flush changes to the backing device. Once we have completed flushing the changes, we return the oplock to the server. There are 2 ways here where we can have data corruption 1) While we flush changes to the backing device as part of the oplock break, we can have processes write to the file. These writes check for the oplock, find none and attempt to write directly to the server. These direct writes made while we are flushing from cache could be overwritten by data being flushed from the cache causing data corruption. 2) While a thread runs in cifs_strict_writev, the machine could receive and process an oplock break after the thread has checked the oplock and found that it allows us to cache and before we have made changes to the cache. In that case, we end up with a dirty page in cache when we shouldn't have any. This will be flushed later and will overwrite all subsequent writes to the part of the file represented by this page. Before making any writes to the server, we need to confirm that we are not in the process of flushing data to the server and if we are, we should wait until the process is complete before we attempt the write. We should also wait for existing writes to complete before we process an oplock break request which changes oplock values. We add a version specific downgrade_oplock() operation to allow for differences in the oplock values set for the different smb versions. Cc: stable@vger.kernel.org Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Steve French <smfrench@gmail.com>
2014-03-11 16:11:47 +00:00
struct TCP_Server_Info *server = tcon->ses->server;
int rc = 0;
bool purge_cache = false;
bool is_deferred = false;
struct cifs_deferred_close *dclose;
cifs: Wait for writebacks to complete before attempting write. Problem reported in Red Hat bz 1040329 for strict writes where we cache only when we hold oplock and write direct to the server when we don't. When we receive an oplock break, we first change the oplock value for the inode in cifsInodeInfo->oplock to indicate that we no longer hold the oplock before we enqueue a task to flush changes to the backing device. Once we have completed flushing the changes, we return the oplock to the server. There are 2 ways here where we can have data corruption 1) While we flush changes to the backing device as part of the oplock break, we can have processes write to the file. These writes check for the oplock, find none and attempt to write directly to the server. These direct writes made while we are flushing from cache could be overwritten by data being flushed from the cache causing data corruption. 2) While a thread runs in cifs_strict_writev, the machine could receive and process an oplock break after the thread has checked the oplock and found that it allows us to cache and before we have made changes to the cache. In that case, we end up with a dirty page in cache when we shouldn't have any. This will be flushed later and will overwrite all subsequent writes to the part of the file represented by this page. Before making any writes to the server, we need to confirm that we are not in the process of flushing data to the server and if we are, we should wait until the process is complete before we attempt the write. We should also wait for existing writes to complete before we process an oplock break request which changes oplock values. We add a version specific downgrade_oplock() operation to allow for differences in the oplock values set for the different smb versions. Cc: stable@vger.kernel.org Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Steve French <smfrench@gmail.com>
2014-03-11 16:11:47 +00:00
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
sched: Remove proliferation of wait_on_bit() action functions The current "wait_on_bit" interface requires an 'action' function to be provided which does the actual waiting. There are over 20 such functions, many of them identical. Most cases can be satisfied by one of just two functions, one which uses io_schedule() and one which just uses schedule(). So: Rename wait_on_bit and wait_on_bit_lock to wait_on_bit_action and wait_on_bit_lock_action to make it explicit that they need an action function. Introduce new wait_on_bit{,_lock} and wait_on_bit{,_lock}_io which are *not* given an action function but implicitly use a standard one. The decision to error-out if a signal is pending is now made based on the 'mode' argument rather than being encoded in the action function. All instances of the old wait_on_bit and wait_on_bit_lock which can use the new version have been changed accordingly and their action functions have been discarded. wait_on_bit{_lock} does not return any specific error code in the event of a signal so the caller must check for non-zero and interpolate their own error code as appropriate. The wait_on_bit() call in __fscache_wait_on_invalidate() was ambiguous as it specified TASK_UNINTERRUPTIBLE but used fscache_wait_bit_interruptible as an action function. David Howells confirms this should be uniformly "uninterruptible" The main remaining user of wait_on_bit{,_lock}_action is NFS which needs to use a freezer-aware schedule() call. A comment in fs/gfs2/glock.c notes that having multiple 'action' functions is useful as they display differently in the 'wchan' field of 'ps'. (and /proc/$PID/wchan). As the new bit_wait{,_io} functions are tagged "__sched", they will not show up at all, but something higher in the stack. So the distinction will still be visible, only with different function names (gds2_glock_wait versus gfs2_glock_dq_wait in the gfs2/glock.c case). Since first version of this patch (against 3.15) two new action functions appeared, on in NFS and one in CIFS. CIFS also now uses an action function that makes the same freezer aware schedule call as NFS. Signed-off-by: NeilBrown <neilb@suse.de> Acked-by: David Howells <dhowells@redhat.com> (fscache, keys) Acked-by: Steven Whitehouse <swhiteho@redhat.com> (gfs2) Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Steve French <sfrench@samba.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20140707051603.28027.72349.stgit@notabene.brown Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-07-07 05:16:04 +00:00
TASK_UNINTERRUPTIBLE);
cifs: Wait for writebacks to complete before attempting write. Problem reported in Red Hat bz 1040329 for strict writes where we cache only when we hold oplock and write direct to the server when we don't. When we receive an oplock break, we first change the oplock value for the inode in cifsInodeInfo->oplock to indicate that we no longer hold the oplock before we enqueue a task to flush changes to the backing device. Once we have completed flushing the changes, we return the oplock to the server. There are 2 ways here where we can have data corruption 1) While we flush changes to the backing device as part of the oplock break, we can have processes write to the file. These writes check for the oplock, find none and attempt to write directly to the server. These direct writes made while we are flushing from cache could be overwritten by data being flushed from the cache causing data corruption. 2) While a thread runs in cifs_strict_writev, the machine could receive and process an oplock break after the thread has checked the oplock and found that it allows us to cache and before we have made changes to the cache. In that case, we end up with a dirty page in cache when we shouldn't have any. This will be flushed later and will overwrite all subsequent writes to the part of the file represented by this page. Before making any writes to the server, we need to confirm that we are not in the process of flushing data to the server and if we are, we should wait until the process is complete before we attempt the write. We should also wait for existing writes to complete before we process an oplock break request which changes oplock values. We add a version specific downgrade_oplock() operation to allow for differences in the oplock values set for the different smb versions. Cc: stable@vger.kernel.org Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Steve French <smfrench@gmail.com>
2014-03-11 16:11:47 +00:00
server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
cfile->oplock_epoch, &purge_cache);
cifs: Wait for writebacks to complete before attempting write. Problem reported in Red Hat bz 1040329 for strict writes where we cache only when we hold oplock and write direct to the server when we don't. When we receive an oplock break, we first change the oplock value for the inode in cifsInodeInfo->oplock to indicate that we no longer hold the oplock before we enqueue a task to flush changes to the backing device. Once we have completed flushing the changes, we return the oplock to the server. There are 2 ways here where we can have data corruption 1) While we flush changes to the backing device as part of the oplock break, we can have processes write to the file. These writes check for the oplock, find none and attempt to write directly to the server. These direct writes made while we are flushing from cache could be overwritten by data being flushed from the cache causing data corruption. 2) While a thread runs in cifs_strict_writev, the machine could receive and process an oplock break after the thread has checked the oplock and found that it allows us to cache and before we have made changes to the cache. In that case, we end up with a dirty page in cache when we shouldn't have any. This will be flushed later and will overwrite all subsequent writes to the part of the file represented by this page. Before making any writes to the server, we need to confirm that we are not in the process of flushing data to the server and if we are, we should wait until the process is complete before we attempt the write. We should also wait for existing writes to complete before we process an oplock break request which changes oplock values. We add a version specific downgrade_oplock() operation to allow for differences in the oplock values set for the different smb versions. Cc: stable@vger.kernel.org Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Steve French <smfrench@gmail.com>
2014-03-11 16:11:47 +00:00
if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
cifs_has_mand_locks(cinode)) {
cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
inode);
cinode->oplock = 0;
}
if (inode && S_ISREG(inode->i_mode)) {
if (CIFS_CACHE_READ(cinode))
break_lease(inode, O_RDONLY);
else
break_lease(inode, O_WRONLY);
rc = filemap_fdatawrite(inode->i_mapping);
if (!CIFS_CACHE_READ(cinode) || purge_cache) {
rc = filemap_fdatawait(inode->i_mapping);
mapping_set_error(inode->i_mapping, rc);
cifs_zap_mapping(inode);
}
cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
if (CIFS_CACHE_WRITE(cinode))
goto oplock_break_ack;
}
rc = cifs_push_locks(cfile);
if (rc)
cifs_dbg(VFS, "Push locks rc = %d\n", rc);
oplock_break_ack:
/*
* When oplock break is received and there are no active
* file handles but cached, then schedule deferred close immediately.
* So, new open will not use cached handle.
*/
spin_lock(&CIFS_I(inode)->deferred_lock);
is_deferred = cifs_is_deferred_close(cfile, &dclose);
spin_unlock(&CIFS_I(inode)->deferred_lock);
if (is_deferred &&
cfile->deferred_close_scheduled &&
delayed_work_pending(&cfile->deferred)) {
if (cancel_delayed_work(&cfile->deferred)) {
_cifsFileInfo_put(cfile, false, false);
goto oplock_break_done;
}
}
/*
* releasing stale oplock after recent reconnect of smb session using
* a now incorrect file handle is not a data integrity issue but do
* not bother sending an oplock release if session to server still is
* disconnected since oplock already released by the server
*/
if (!cfile->oplock_break_cancelled) {
rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
cinode);
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
}
oplock_break_done:
cifs: move cifsFileInfo_put logic into a work-queue This patch moves the final part of the cifsFileInfo_put() logic where we need a write lock on lock_sem to be processed in a separate thread that holds no other locks. This is to prevent deadlocks like the one below: > there are 6 processes looping to while trying to down_write > cinode->lock_sem, 5 of them from _cifsFileInfo_put, and one from > cifs_new_fileinfo > > and there are 5 other processes which are blocked, several of them > waiting on either PG_writeback or PG_locked (which are both set), all > for the same page of the file > > 2 inode_lock() (inode->i_rwsem) for the file > 1 wait_on_page_writeback() for the page > 1 down_read(inode->i_rwsem) for the inode of the directory > 1 inode_lock()(inode->i_rwsem) for the inode of the directory > 1 __lock_page > > > so processes are blocked waiting on: > page flags PG_locked and PG_writeback for one specific page > inode->i_rwsem for the directory > inode->i_rwsem for the file > cifsInodeInflock_sem > > > > here are the more gory details (let me know if I need to provide > anything more/better): > > [0 00:48:22.765] [UN] PID: 8863 TASK: ffff8c691547c5c0 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff9965007e3ba8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007e3c38] schedule at ffffffff9b6e64df > #2 [ffff9965007e3c48] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965007e3cb8] legitimize_path at ffffffff9b0f975d > #4 [ffff9965007e3d08] path_openat at ffffffff9b0fe55d > #5 [ffff9965007e3dd8] do_filp_open at ffffffff9b100a33 > #6 [ffff9965007e3ee0] do_sys_open at ffffffff9b0eb2d6 > #7 [ffff9965007e3f38] do_syscall_64 at ffffffff9ae04315 > * (I think legitimize_path is bogus) > > in path_openat > } else { > const char *s = path_init(nd, flags); > while (!(error = link_path_walk(s, nd)) && > (error = do_last(nd, file, op)) > 0) { <<<< > > do_last: > if (open_flag & O_CREAT) > inode_lock(dir->d_inode); <<<< > else > so it's trying to take inode->i_rwsem for the directory > > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68bb8e79c0 ffff8c691158ef20 ffff8c6915bf9000 DIR /mnt/vm1_smb/ > inode.i_rwsem is ffff8c691158efc0 > > <struct rw_semaphore 0xffff8c691158efc0>: > owner: <struct task_struct 0xffff8c6914275d00> (UN - 8856 - > reopen_file), counter: 0x0000000000000003 > waitlist: 2 > 0xffff9965007e3c90 8863 reopen_file UN 0 1:29:22.926 > RWSEM_WAITING_FOR_WRITE > 0xffff996500393e00 9802 ls UN 0 1:17:26.700 > RWSEM_WAITING_FOR_READ > > > the owner of the inode.i_rwsem of the directory is: > > [0 00:00:00.109] [UN] PID: 8856 TASK: ffff8c6914275d00 CPU: 3 > COMMAND: "reopen_file" > #0 [ffff99650065b828] __schedule at ffffffff9b6e6095 > #1 [ffff99650065b8b8] schedule at ffffffff9b6e64df > #2 [ffff99650065b8c8] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff99650065b940] msleep at ffffffff9af573a9 > #4 [ffff99650065b948] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff99650065ba38] cifs_writepage_locked at ffffffffc0a0b8f3 [cifs] > #6 [ffff99650065bab0] cifs_launder_page at ffffffffc0a0bb72 [cifs] > #7 [ffff99650065bb30] invalidate_inode_pages2_range at ffffffff9b04d4bd > #8 [ffff99650065bcb8] cifs_invalidate_mapping at ffffffffc0a11339 [cifs] > #9 [ffff99650065bcd0] cifs_revalidate_mapping at ffffffffc0a1139a [cifs] > #10 [ffff99650065bcf0] cifs_d_revalidate at ffffffffc0a014f6 [cifs] > #11 [ffff99650065bd08] path_openat at ffffffff9b0fe7f7 > #12 [ffff99650065bdd8] do_filp_open at ffffffff9b100a33 > #13 [ffff99650065bee0] do_sys_open at ffffffff9b0eb2d6 > #14 [ffff99650065bf38] do_syscall_64 at ffffffff9ae04315 > > cifs_launder_page is for page 0xffffd1e2c07d2480 > > crash> page.index,mapping,flags 0xffffd1e2c07d2480 > index = 0x8 > mapping = 0xffff8c68f3cd0db0 > flags = 0xfffffc0008095 > > PAGE-FLAG BIT VALUE > PG_locked 0 0000001 > PG_uptodate 2 0000004 > PG_lru 4 0000010 > PG_waiters 7 0000080 > PG_writeback 15 0008000 > > > inode is ffff8c68f3cd0c40 > inode.i_rwsem is ffff8c68f3cd0ce0 > DENTRY INODE SUPERBLK TYPE PATH > ffff8c68a1f1b480 ffff8c68f3cd0c40 ffff8c6915bf9000 REG > /mnt/vm1_smb/testfile.8853 > > > this process holds the inode->i_rwsem for the parent directory, is > laundering a page attached to the inode of the file it's opening, and in > _cifsFileInfo_put is trying to down_write the cifsInodeInflock_sem > for the file itself. > > > <struct rw_semaphore 0xffff8c68f3cd0ce0>: > owner: <struct task_struct 0xffff8c6914272e80> (UN - 8854 - > reopen_file), counter: 0x0000000000000003 > waitlist: 1 > 0xffff9965005dfd80 8855 reopen_file UN 0 1:29:22.912 > RWSEM_WAITING_FOR_WRITE > > this is the inode.i_rwsem for the file > > the owner: > > [0 00:48:22.739] [UN] PID: 8854 TASK: ffff8c6914272e80 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff99650054fb38] __schedule at ffffffff9b6e6095 > #1 [ffff99650054fbc8] schedule at ffffffff9b6e64df > #2 [ffff99650054fbd8] io_schedule at ffffffff9b6e68e2 > #3 [ffff99650054fbe8] __lock_page at ffffffff9b03c56f > #4 [ffff99650054fc80] pagecache_get_page at ffffffff9b03dcdf > #5 [ffff99650054fcc0] grab_cache_page_write_begin at ffffffff9b03ef4c > #6 [ffff99650054fcd0] cifs_write_begin at ffffffffc0a064ec [cifs] > #7 [ffff99650054fd30] generic_perform_write at ffffffff9b03bba4 > #8 [ffff99650054fda8] __generic_file_write_iter at ffffffff9b04060a > #9 [ffff99650054fdf0] cifs_strict_writev.cold.70 at ffffffffc0a4469b [cifs] > #10 [ffff99650054fe48] new_sync_write at ffffffff9b0ec1dd > #11 [ffff99650054fed0] vfs_write at ffffffff9b0eed35 > #12 [ffff99650054ff00] ksys_write at ffffffff9b0eefd9 > #13 [ffff99650054ff38] do_syscall_64 at ffffffff9ae04315 > > the process holds the inode->i_rwsem for the file to which it's writing, > and is trying to __lock_page for the same page as in the other processes > > > the other tasks: > [0 00:00:00.028] [UN] PID: 8859 TASK: ffff8c6915479740 CPU: 2 > COMMAND: "reopen_file" > #0 [ffff9965007b39d8] __schedule at ffffffff9b6e6095 > #1 [ffff9965007b3a68] schedule at ffffffff9b6e64df > #2 [ffff9965007b3a78] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007b3af0] msleep at ffffffff9af573a9 > #4 [ffff9965007b3af8] cifs_new_fileinfo.cold.61 at ffffffffc0a42a07 [cifs] > #5 [ffff9965007b3b78] cifs_open at ffffffffc0a0709d [cifs] > #6 [ffff9965007b3cd8] do_dentry_open at ffffffff9b0e9b7a > #7 [ffff9965007b3d08] path_openat at ffffffff9b0fe34f > #8 [ffff9965007b3dd8] do_filp_open at ffffffff9b100a33 > #9 [ffff9965007b3ee0] do_sys_open at ffffffff9b0eb2d6 > #10 [ffff9965007b3f38] do_syscall_64 at ffffffff9ae04315 > > this is opening the file, and is trying to down_write cinode->lock_sem > > > [0 00:00:00.041] [UN] PID: 8860 TASK: ffff8c691547ae80 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.057] [UN] PID: 8861 TASK: ffff8c6915478000 CPU: 3 > COMMAND: "reopen_file" > [0 00:00:00.059] [UN] PID: 8858 TASK: ffff8c6914271740 CPU: 2 > COMMAND: "reopen_file" > [0 00:00:00.109] [UN] PID: 8862 TASK: ffff8c691547dd00 CPU: 6 > COMMAND: "reopen_file" > #0 [ffff9965007c3c78] __schedule at ffffffff9b6e6095 > #1 [ffff9965007c3d08] schedule at ffffffff9b6e64df > #2 [ffff9965007c3d18] schedule_timeout at ffffffff9b6e9f89 > #3 [ffff9965007c3d90] msleep at ffffffff9af573a9 > #4 [ffff9965007c3d98] _cifsFileInfo_put.cold.63 at ffffffffc0a42dd6 [cifs] > #5 [ffff9965007c3e88] cifs_close at ffffffffc0a07aaf [cifs] > #6 [ffff9965007c3ea0] __fput at ffffffff9b0efa6e > #7 [ffff9965007c3ee8] task_work_run at ffffffff9aef1614 > #8 [ffff9965007c3f20] exit_to_usermode_loop at ffffffff9ae03d6f > #9 [ffff9965007c3f38] do_syscall_64 at ffffffff9ae0444c > > closing the file, and trying to down_write cifsi->lock_sem > > > [0 00:48:22.839] [UN] PID: 8857 TASK: ffff8c6914270000 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965006a7cc8] __schedule at ffffffff9b6e6095 > #1 [ffff9965006a7d58] schedule at ffffffff9b6e64df > #2 [ffff9965006a7d68] io_schedule at ffffffff9b6e68e2 > #3 [ffff9965006a7d78] wait_on_page_bit at ffffffff9b03cac6 > #4 [ffff9965006a7e10] __filemap_fdatawait_range at ffffffff9b03b028 > #5 [ffff9965006a7ed8] filemap_write_and_wait at ffffffff9b040165 > #6 [ffff9965006a7ef0] cifs_flush at ffffffffc0a0c2fa [cifs] > #7 [ffff9965006a7f10] filp_close at ffffffff9b0e93f1 > #8 [ffff9965006a7f30] __x64_sys_close at ffffffff9b0e9a0e > #9 [ffff9965006a7f38] do_syscall_64 at ffffffff9ae04315 > > in __filemap_fdatawait_range > wait_on_page_writeback(page); > for the same page of the file > > > > [0 00:48:22.718] [UN] PID: 8855 TASK: ffff8c69142745c0 CPU: 7 > COMMAND: "reopen_file" > #0 [ffff9965005dfc98] __schedule at ffffffff9b6e6095 > #1 [ffff9965005dfd28] schedule at ffffffff9b6e64df > #2 [ffff9965005dfd38] rwsem_down_write_slowpath at ffffffff9af283d7 > #3 [ffff9965005dfdf0] cifs_strict_writev at ffffffffc0a0c40a [cifs] > #4 [ffff9965005dfe48] new_sync_write at ffffffff9b0ec1dd > #5 [ffff9965005dfed0] vfs_write at ffffffff9b0eed35 > #6 [ffff9965005dff00] ksys_write at ffffffff9b0eefd9 > #7 [ffff9965005dff38] do_syscall_64 at ffffffff9ae04315 > > inode_lock(inode); > > > and one 'ls' later on, to see whether the rest of the mount is available > (the test file is in the root, so we get blocked up on the directory > ->i_rwsem), so the entire mount is unavailable > > [0 00:36:26.473] [UN] PID: 9802 TASK: ffff8c691436ae80 CPU: 4 > COMMAND: "ls" > #0 [ffff996500393d28] __schedule at ffffffff9b6e6095 > #1 [ffff996500393db8] schedule at ffffffff9b6e64df > #2 [ffff996500393dc8] rwsem_down_read_slowpath at ffffffff9b6e9421 > #3 [ffff996500393e78] down_read_killable at ffffffff9b6e95e2 > #4 [ffff996500393e88] iterate_dir at ffffffff9b103c56 > #5 [ffff996500393ec8] ksys_getdents64 at ffffffff9b104b0c > #6 [ffff996500393f30] __x64_sys_getdents64 at ffffffff9b104bb6 > #7 [ffff996500393f38] do_syscall_64 at ffffffff9ae04315 > > in iterate_dir: > if (shared) > res = down_read_killable(&inode->i_rwsem); <<<< > else > res = down_write_killable(&inode->i_rwsem); > Reported-by: Frank Sorenson <sorenson@redhat.com> Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com> Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2019-11-03 03:06:37 +00:00
_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
cifs: Wait for writebacks to complete before attempting write. Problem reported in Red Hat bz 1040329 for strict writes where we cache only when we hold oplock and write direct to the server when we don't. When we receive an oplock break, we first change the oplock value for the inode in cifsInodeInfo->oplock to indicate that we no longer hold the oplock before we enqueue a task to flush changes to the backing device. Once we have completed flushing the changes, we return the oplock to the server. There are 2 ways here where we can have data corruption 1) While we flush changes to the backing device as part of the oplock break, we can have processes write to the file. These writes check for the oplock, find none and attempt to write directly to the server. These direct writes made while we are flushing from cache could be overwritten by data being flushed from the cache causing data corruption. 2) While a thread runs in cifs_strict_writev, the machine could receive and process an oplock break after the thread has checked the oplock and found that it allows us to cache and before we have made changes to the cache. In that case, we end up with a dirty page in cache when we shouldn't have any. This will be flushed later and will overwrite all subsequent writes to the part of the file represented by this page. Before making any writes to the server, we need to confirm that we are not in the process of flushing data to the server and if we are, we should wait until the process is complete before we attempt the write. We should also wait for existing writes to complete before we process an oplock break request which changes oplock values. We add a version specific downgrade_oplock() operation to allow for differences in the oplock values set for the different smb versions. Cc: stable@vger.kernel.org Signed-off-by: Sachin Prabhu <sprabhu@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Steve French <smfrench@gmail.com>
2014-03-11 16:11:47 +00:00
cifs_done_oplock_break(cinode);
}
/*
* The presence of cifs_direct_io() in the address space ops vector
* allowes open() O_DIRECT flags which would have failed otherwise.
*
* In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
* so this method should never be called.
*
* Direct IO is not yet supported in the cached mode.
*/
static ssize_t
cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
{
/*
* FIXME
* Eventually need to support direct IO for non forcedirectio mounts
*/
return -EINVAL;
}
static int cifs_swap_activate(struct swap_info_struct *sis,
struct file *swap_file, sector_t *span)
{
struct cifsFileInfo *cfile = swap_file->private_data;
struct inode *inode = swap_file->f_mapping->host;
unsigned long blocks;
long long isize;
cifs_dbg(FYI, "swap activate\n");
spin_lock(&inode->i_lock);
blocks = inode->i_blocks;
isize = inode->i_size;
spin_unlock(&inode->i_lock);
if (blocks*512 < isize) {
pr_warn("swap activate: swapfile has holes\n");
return -EINVAL;
}
*span = sis->pages;
pr_warn_once("Swap support over SMB3 is experimental\n");
/*
* TODO: consider adding ACL (or documenting how) to prevent other
* users (on this or other systems) from reading it
*/
/* TODO: add sk_set_memalloc(inet) or similar */
if (cfile)
cfile->swapfile = true;
/*
* TODO: Since file already open, we can't open with DENY_ALL here
* but we could add call to grab a byte range lock to prevent others
* from reading or writing the file
*/
return 0;
}
static void cifs_swap_deactivate(struct file *file)
{
struct cifsFileInfo *cfile = file->private_data;
cifs_dbg(FYI, "swap deactivate\n");
/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
if (cfile)
cfile->swapfile = false;
/* do we need to unpin (or unlock) the file */
}
const struct address_space_operations cifs_addr_ops = {
.readpage = cifs_readpage,
.readpages = cifs_readpages,
.writepage = cifs_writepage,
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = cifs_release_page,
.direct_IO = cifs_direct_io,
.invalidatepage = cifs_invalidate_page,
.launder_page = cifs_launder_page,
/*
* TODO: investigate and if useful we could add an cifs_migratePage
* helper (under an CONFIG_MIGRATION) in the future, and also
* investigate and add an is_dirty_writeback helper if needed
*/
.swap_activate = cifs_swap_activate,
.swap_deactivate = cifs_swap_deactivate,
};
/*
* cifs_readpages requires the server to support a buffer large enough to
* contain the header plus one complete page of data. Otherwise, we need
* to leave cifs_readpages out of the address space operations.
*/
const struct address_space_operations cifs_addr_ops_smallbuf = {
.readpage = cifs_readpage,
.writepage = cifs_writepage,
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page,
.launder_page = cifs_launder_page,
};