Merge branch 'writeback-for-2.6.34' into nfs-for-2.6.34
This commit is contained in:
@@ -97,22 +97,6 @@ u64 nfs_compat_user_ino64(u64 fileid)
|
||||
return ino;
|
||||
}
|
||||
|
||||
int nfs_write_inode(struct inode *inode, int sync)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (sync) {
|
||||
ret = filemap_fdatawait(inode->i_mapping);
|
||||
if (ret == 0)
|
||||
ret = nfs_commit_inode(inode, FLUSH_SYNC);
|
||||
} else
|
||||
ret = nfs_commit_inode(inode, 0);
|
||||
if (ret >= 0)
|
||||
return 0;
|
||||
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void nfs_clear_inode(struct inode *inode)
|
||||
{
|
||||
/*
|
||||
@@ -130,16 +114,12 @@ void nfs_clear_inode(struct inode *inode)
|
||||
*/
|
||||
int nfs_sync_mapping(struct address_space *mapping)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (mapping->nrpages == 0)
|
||||
return 0;
|
||||
unmap_mapping_range(mapping, 0, 0, 0);
|
||||
ret = filemap_write_and_wait(mapping);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
ret = nfs_wb_all(mapping->host);
|
||||
out:
|
||||
if (mapping->nrpages != 0) {
|
||||
unmap_mapping_range(mapping, 0, 0, 0);
|
||||
ret = nfs_wb_all(mapping->host);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -511,17 +491,11 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
|
||||
int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Flush out writes to the server in order to update c/mtime.
|
||||
*
|
||||
* Hold the i_mutex to suspend application writes temporarily;
|
||||
* this prevents long-running writing applications from blocking
|
||||
* nfs_wb_nocommit.
|
||||
*/
|
||||
/* Flush out writes to the server in order to update c/mtime. */
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
nfs_wb_nocommit(inode);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
err = filemap_write_and_wait(inode->i_mapping);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -545,6 +519,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
|
||||
generic_fillattr(inode, stat);
|
||||
stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -574,14 +549,14 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
|
||||
nfs_revalidate_inode(server, inode);
|
||||
}
|
||||
|
||||
static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, struct dentry *dentry, struct rpc_cred *cred)
|
||||
static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred)
|
||||
{
|
||||
struct nfs_open_context *ctx;
|
||||
|
||||
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (ctx != NULL) {
|
||||
ctx->path.dentry = dget(dentry);
|
||||
ctx->path.mnt = mntget(mnt);
|
||||
ctx->path = *path;
|
||||
path_get(&ctx->path);
|
||||
ctx->cred = get_rpccred(cred);
|
||||
ctx->state = NULL;
|
||||
ctx->lockowner = current->files;
|
||||
@@ -681,7 +656,7 @@ int nfs_open(struct inode *inode, struct file *filp)
|
||||
cred = rpc_lookup_cred();
|
||||
if (IS_ERR(cred))
|
||||
return PTR_ERR(cred);
|
||||
ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred);
|
||||
ctx = alloc_nfs_open_context(&filp->f_path, cred);
|
||||
put_rpccred(cred);
|
||||
if (ctx == NULL)
|
||||
return -ENOMEM;
|
||||
@@ -774,7 +749,7 @@ int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
||||
return __nfs_revalidate_inode(server, inode);
|
||||
}
|
||||
|
||||
static int nfs_invalidate_mapping_nolock(struct inode *inode, struct address_space *mapping)
|
||||
static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
|
||||
@@ -795,49 +770,10 @@ static int nfs_invalidate_mapping_nolock(struct inode *inode, struct address_spa
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_DATA) {
|
||||
ret = nfs_sync_mapping(mapping);
|
||||
if (ret == 0)
|
||||
ret = nfs_invalidate_mapping_nolock(inode, mapping);
|
||||
}
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_revalidate_mapping_nolock - Revalidate the pagecache
|
||||
* @inode - pointer to host inode
|
||||
* @mapping - pointer to mapping
|
||||
*/
|
||||
int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
int ret = 0;
|
||||
|
||||
if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
|
||||
|| nfs_attribute_timeout(inode) || NFS_STALE(inode)) {
|
||||
ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
|
||||
ret = nfs_invalidate_mapping_nolock(inode, mapping);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_revalidate_mapping - Revalidate the pagecache
|
||||
* @inode - pointer to host inode
|
||||
* @mapping - pointer to mapping
|
||||
*
|
||||
* This version of the function will take the inode->i_mutex and attempt to
|
||||
* flush out all dirty data if it needs to invalidate the page cache.
|
||||
*/
|
||||
int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
|
||||
{
|
||||
@@ -1415,6 +1351,7 @@ static void init_once(void *foo)
|
||||
INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
|
||||
INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
|
||||
nfsi->npages = 0;
|
||||
nfsi->ncommit = 0;
|
||||
atomic_set(&nfsi->silly_count, 1);
|
||||
INIT_HLIST_HEAD(&nfsi->silly_list);
|
||||
init_waitqueue_head(&nfsi->waitqueue);
|
||||
|
||||
Reference in New Issue
Block a user