forked from Minki/linux
We've got ten patches this time, half of which are related to a plethora
of nasty outcomes when inodes are transitioned from the unlinked state to the free state. Small file systems are particularly vulnerable to these problems, and it can manifest as mainly hangs, but also file system corruption. The patches have been tested for literally many weeks, with a very gruelling test, so I have a high level of confidence. - Andreas Gruenbacher wrote a series of 5 patches for various lockups during the transition of inodes from unlinked to free. The main patch is titled "Fix gfs2_lookup_by_inum lock inversion" and the other 4 are support and cleanup patches related to that. - Ben Marzinski contributed 2 patches with regard to a recreatable problem when gfs2 tries to write a page to a file that is being truncated, resulting in a BUG() in gfs2_remove_from_journal. Note that Ben had to export vfs function __block_write_full_page to get this to work properly. It's been posted a long time and he talked to various VFS people about it, and nobody seemed to mind. - I contributed 3 patches. (1) The first one fixes a memory corruptor: a race in which one process can overwrite the gl_object pointer set by another process, causing kernel panic and other symptoms. (2) The second patch fixes another race that resulted in a false-positive BUG_ON. This occurred when resource group reservations were freed by one process while another process was trying to grab a new reservation in the same resource group. (3) The third patch fixes a problem with doing journal replay when the journals are not all the same size. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJXklXIAAoJENeLYdPf93o7AbIIAImLEixK+4CaItEArAKG9TXv WbO+eDJfo6AOtAteB6+MdX2UxXAHJsCY6RmiEIAi5LzlVFiiCgRo4z/QgDARAw3c 2RxlndElaESh82S27sLiFbgZeY7GZv04C0t6AzMkc830BLXiKMs6bXfeq1fzW8Sf AgAneACVsX0faRWo/XDuQcK81dwZ+qdOnR2+FvtOSFl1KgV0BrtnsW7IHv+5MIot SREDN7VvSQwQrLgwMlC0PvhwK3KCVvuO9ZziLEPpYJONESJfEmuCpG265+tUJNTw dIcW3p/vvgow8fb56fSnAxaeplPLlF9qJCq1M9fWZrKVbHg2uyCZMx4P52Fnmz4= =uUVs -----END PGP SIGNATURE----- Merge tag 'gfs2-4.7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2 Pull gfs2 updates from Bob Peterson: "We've got ten patches this time, half of which are related to a plethora of nasty outcomes when inodes are transitioned from the unlinked state to the free state. Small file systems are particularly vulnerable to these problems, and it can manifest as mainly hangs, but also file system corruption. The patches have been tested for literally many weeks, with a very gruelling test, so I have a high level of confidence. - Andreas Gruenbacher wrote a series of five patches for various lockups during the transition of inodes from unlinked to free. The main patch is titled "Fix gfs2_lookup_by_inum lock inversion" and the other four are support and cleanup patches related to that. - Ben Marzinski contributed two patches with regard to a recreatable problem when gfs2 tries to write a page to a file that is being truncated, resulting in a BUG() in gfs2_remove_from_journal. Note that Ben had to export vfs function __block_write_full_page to get this to work properly. It's been posted a long time and he talked to various VFS people about it, and nobody seemed to mind. - I contributed 3 patches: o The first one fixes a memory corruptor: a race in which one process can overwrite the gl_object pointer set by another process, causing kernel panic and other symptoms. o The second patch fixes another race that resulted in a false-positive BUG_ON. This occurred when resource group reservations were freed by one process while another process was trying to grab a new reservation in the same resource group. o The third patch fixes a problem with doing journal replay when the journals are not all the same size" * tag 'gfs2-4.7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: GFS2: Fix gfs2_replay_incr_blk for multiple journal sizes GFS2: Check rs_free with rd_rsspin protection gfs2: writeout truncated pages fs: export __block_write_full_page gfs2: Lock holder cleanup gfs2: Large-filesystem fix for 32-bit systems gfs2: Get rid of gfs2_ilookup gfs2: Fix gfs2_lookup_by_inum lock inversion gfs2: Initialize iopen glock holder for new inodes GFS2: don't set rgrp gl_object until it's inserted into rgrp tree
This commit is contained in:
commit
b403f23044
@ -1687,7 +1687,7 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
|
||||
* WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
|
||||
* causes the writes to be flagged as synchronous writes.
|
||||
*/
|
||||
static int __block_write_full_page(struct inode *inode, struct page *page,
|
||||
int __block_write_full_page(struct inode *inode, struct page *page,
|
||||
get_block_t *get_block, struct writeback_control *wbc,
|
||||
bh_end_io_t *handler)
|
||||
{
|
||||
@ -1848,6 +1848,7 @@ recover:
|
||||
unlock_page(page);
|
||||
goto done;
|
||||
}
|
||||
EXPORT_SYMBOL(__block_write_full_page);
|
||||
|
||||
/*
|
||||
* If a page has any new buffers, zero them out here, and mark them uptodate
|
||||
|
@ -140,6 +140,32 @@ static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
|
||||
return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
|
||||
}
|
||||
|
||||
/* This is the same as calling block_write_full_page, but it also
|
||||
* writes pages outside of i_size
|
||||
*/
|
||||
int gfs2_write_full_page(struct page *page, get_block_t *get_block,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct inode * const inode = page->mapping->host;
|
||||
loff_t i_size = i_size_read(inode);
|
||||
const pgoff_t end_index = i_size >> PAGE_SHIFT;
|
||||
unsigned offset;
|
||||
|
||||
/*
|
||||
* The page straddles i_size. It must be zeroed out on each and every
|
||||
* writepage invocation because it may be mmapped. "A file is mapped
|
||||
* in multiples of the page size. For a file that is not a multiple of
|
||||
* the page size, the remaining memory is zeroed when mapped, and
|
||||
* writes to that region are not written out to the file."
|
||||
*/
|
||||
offset = i_size & (PAGE_SIZE-1);
|
||||
if (page->index == end_index && offset)
|
||||
zero_user_segment(page, offset, PAGE_SIZE);
|
||||
|
||||
return __block_write_full_page(inode, page, get_block, wbc,
|
||||
end_buffer_async_write);
|
||||
}
|
||||
|
||||
/**
|
||||
* __gfs2_jdata_writepage - The core of jdata writepage
|
||||
* @page: The page to write
|
||||
@ -165,7 +191,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
|
||||
}
|
||||
gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
|
||||
}
|
||||
return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
|
||||
return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -180,27 +206,20 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
|
||||
static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
int ret;
|
||||
int done_trans = 0;
|
||||
|
||||
if (PageChecked(page)) {
|
||||
if (wbc->sync_mode != WB_SYNC_ALL)
|
||||
goto out_ignore;
|
||||
ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
|
||||
if (ret)
|
||||
goto out_ignore;
|
||||
done_trans = 1;
|
||||
}
|
||||
ret = gfs2_writepage_common(page, wbc);
|
||||
if (ret > 0)
|
||||
ret = __gfs2_jdata_writepage(page, wbc);
|
||||
if (done_trans)
|
||||
gfs2_trans_end(sdp);
|
||||
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
|
||||
goto out;
|
||||
if (PageChecked(page) || current->journal_info)
|
||||
goto out_ignore;
|
||||
ret = __gfs2_jdata_writepage(page, wbc);
|
||||
return ret;
|
||||
|
||||
out_ignore:
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
out:
|
||||
unlock_page(page);
|
||||
return 0;
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ static int gfs2_dentry_delete(const struct dentry *dentry)
|
||||
return 0;
|
||||
|
||||
ginode = GFS2_I(d_inode(dentry));
|
||||
if (!ginode->i_iopen_gh.gh_gl)
|
||||
if (!gfs2_holder_initialized(&ginode->i_iopen_gh))
|
||||
return 0;
|
||||
|
||||
if (test_bit(GLF_DEMOTE, &ginode->i_iopen_gh.gh_gl->gl_flags))
|
||||
|
@ -1663,7 +1663,8 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name,
|
||||
brelse(bh);
|
||||
if (fail_on_exist)
|
||||
return ERR_PTR(-EEXIST);
|
||||
inode = gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino);
|
||||
inode = gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino,
|
||||
GFS2_BLKST_FREE /* ignore */);
|
||||
if (!IS_ERR(inode))
|
||||
GFS2_I(inode)->i_rahead = rahead;
|
||||
return inode;
|
||||
|
@ -137,21 +137,10 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct inode *inode;
|
||||
|
||||
inode = gfs2_ilookup(sb, inum->no_addr);
|
||||
if (inode) {
|
||||
if (GFS2_I(inode)->i_no_formal_ino != inum->no_formal_ino) {
|
||||
iput(inode);
|
||||
return ERR_PTR(-ESTALE);
|
||||
}
|
||||
goto out_inode;
|
||||
}
|
||||
|
||||
inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino,
|
||||
GFS2_BLKST_DINODE);
|
||||
if (IS_ERR(inode))
|
||||
return ERR_CAST(inode);
|
||||
|
||||
out_inode:
|
||||
return d_obtain_alias(inode);
|
||||
}
|
||||
|
||||
|
@ -1098,7 +1098,7 @@ static void do_unflock(struct file *file, struct file_lock *fl)
|
||||
|
||||
mutex_lock(&fp->f_fl_mutex);
|
||||
locks_lock_file_wait(file, fl);
|
||||
if (fl_gh->gh_gl) {
|
||||
if (gfs2_holder_initialized(fl_gh)) {
|
||||
gfs2_glock_dq(fl_gh);
|
||||
gfs2_holder_uninit(fl_gh);
|
||||
}
|
||||
|
@ -575,7 +575,6 @@ static void delete_work_func(struct work_struct *work)
|
||||
{
|
||||
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct gfs2_inode *ip;
|
||||
struct inode *inode;
|
||||
u64 no_addr = gl->gl_name.ln_number;
|
||||
|
||||
@ -585,13 +584,7 @@ static void delete_work_func(struct work_struct *work)
|
||||
if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
|
||||
goto out;
|
||||
|
||||
ip = gl->gl_object;
|
||||
/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
|
||||
|
||||
if (ip)
|
||||
inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
|
||||
else
|
||||
inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
|
||||
inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
|
||||
if (inode && !IS_ERR(inode)) {
|
||||
d_prune_aliases(inode);
|
||||
iput(inode);
|
||||
@ -808,7 +801,7 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
|
||||
{
|
||||
put_pid(gh->gh_owner_pid);
|
||||
gfs2_glock_put(gh->gh_gl);
|
||||
gh->gh_gl = NULL;
|
||||
gfs2_holder_mark_uninitialized(gh);
|
||||
gh->gh_ip = 0;
|
||||
}
|
||||
|
||||
|
@ -247,4 +247,14 @@ extern void gfs2_unregister_debugfs(void);
|
||||
|
||||
extern const struct lm_lockops gfs2_dlm_ops;
|
||||
|
||||
static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh)
|
||||
{
|
||||
gh->gh_gl = NULL;
|
||||
}
|
||||
|
||||
static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
|
||||
{
|
||||
return gh->gh_gl;
|
||||
}
|
||||
|
||||
#endif /* __GLOCK_DOT_H__ */
|
||||
|
128
fs/gfs2/inode.c
128
fs/gfs2/inode.c
@ -37,9 +37,35 @@
|
||||
#include "super.h"
|
||||
#include "glops.h"
|
||||
|
||||
struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
|
||||
static int iget_test(struct inode *inode, void *opaque)
|
||||
{
|
||||
return ilookup(sb, (unsigned long)no_addr);
|
||||
u64 no_addr = *(u64 *)opaque;
|
||||
|
||||
return GFS2_I(inode)->i_no_addr == no_addr;
|
||||
}
|
||||
|
||||
static int iget_set(struct inode *inode, void *opaque)
|
||||
{
|
||||
u64 no_addr = *(u64 *)opaque;
|
||||
|
||||
GFS2_I(inode)->i_no_addr = no_addr;
|
||||
inode->i_ino = no_addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
repeat:
|
||||
inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
|
||||
if (!inode)
|
||||
return inode;
|
||||
if (is_bad_inode(inode)) {
|
||||
iput(inode);
|
||||
goto repeat;
|
||||
}
|
||||
return inode;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -78,26 +104,37 @@ static void gfs2_set_iop(struct inode *inode)
|
||||
/**
|
||||
* gfs2_inode_lookup - Lookup an inode
|
||||
* @sb: The super block
|
||||
* @no_addr: The inode number
|
||||
* @type: The type of the inode
|
||||
* @no_addr: The inode number
|
||||
* @no_formal_ino: The inode generation number
|
||||
* @blktype: Requested block type (GFS2_BLKST_DINODE or GFS2_BLKST_UNLINKED;
|
||||
* GFS2_BLKST_FREE do indicate not to verify)
|
||||
*
|
||||
* If @type is DT_UNKNOWN, the inode type is fetched from disk.
|
||||
*
|
||||
* If @blktype is anything other than GFS2_BLKST_FREE (which is used as a
|
||||
* placeholder because it doesn't otherwise make sense), the on-disk block type
|
||||
* is verified to be @blktype.
|
||||
*
|
||||
* Returns: A VFS inode, or an error
|
||||
*/
|
||||
|
||||
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
||||
u64 no_addr, u64 no_formal_ino)
|
||||
u64 no_addr, u64 no_formal_ino,
|
||||
unsigned int blktype)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct gfs2_inode *ip;
|
||||
struct gfs2_glock *io_gl = NULL;
|
||||
struct gfs2_holder i_gh;
|
||||
int error;
|
||||
|
||||
inode = iget_locked(sb, (unsigned long)no_addr);
|
||||
gfs2_holder_mark_uninitialized(&i_gh);
|
||||
inode = gfs2_iget(sb, no_addr);
|
||||
if (!inode)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ip = GFS2_I(inode);
|
||||
ip->i_no_addr = no_addr;
|
||||
|
||||
if (inode->i_state & I_NEW) {
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
@ -112,10 +149,29 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
||||
if (unlikely(error))
|
||||
goto fail_put;
|
||||
|
||||
if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
|
||||
/*
|
||||
* The GL_SKIP flag indicates to skip reading the inode
|
||||
* block. We read the inode with gfs2_inode_refresh
|
||||
* after possibly checking the block type.
|
||||
*/
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
|
||||
GL_SKIP, &i_gh);
|
||||
if (error)
|
||||
goto fail_put;
|
||||
|
||||
if (blktype != GFS2_BLKST_FREE) {
|
||||
error = gfs2_check_blk_type(sdp, no_addr,
|
||||
blktype);
|
||||
if (error)
|
||||
goto fail_put;
|
||||
}
|
||||
}
|
||||
|
||||
set_bit(GIF_INVALID, &ip->i_flags);
|
||||
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
|
||||
if (unlikely(error))
|
||||
goto fail_iopen;
|
||||
goto fail_put;
|
||||
|
||||
ip->i_iopen_gh.gh_gl->gl_object = ip;
|
||||
gfs2_glock_put(io_gl);
|
||||
@ -134,6 +190,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
||||
unlock_new_inode(inode);
|
||||
}
|
||||
|
||||
if (gfs2_holder_initialized(&i_gh))
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
return inode;
|
||||
|
||||
fail_refresh:
|
||||
@ -141,10 +199,11 @@ fail_refresh:
|
||||
ip->i_iopen_gh.gh_gl->gl_object = NULL;
|
||||
gfs2_glock_dq_wait(&ip->i_iopen_gh);
|
||||
gfs2_holder_uninit(&ip->i_iopen_gh);
|
||||
fail_iopen:
|
||||
fail_put:
|
||||
if (io_gl)
|
||||
gfs2_glock_put(io_gl);
|
||||
fail_put:
|
||||
if (gfs2_holder_initialized(&i_gh))
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
ip->i_gl->gl_object = NULL;
|
||||
fail:
|
||||
iget_failed(inode);
|
||||
@ -155,23 +214,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
|
||||
u64 *no_formal_ino, unsigned int blktype)
|
||||
{
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
struct gfs2_holder i_gh;
|
||||
struct inode *inode = NULL;
|
||||
struct inode *inode;
|
||||
int error;
|
||||
|
||||
/* Must not read in block until block type is verified */
|
||||
error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
|
||||
LM_ST_EXCLUSIVE, GL_SKIP, &i_gh);
|
||||
if (error)
|
||||
return ERR_PTR(error);
|
||||
|
||||
error = gfs2_check_blk_type(sdp, no_addr, blktype);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0);
|
||||
inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, blktype);
|
||||
if (IS_ERR(inode))
|
||||
goto fail;
|
||||
return inode;
|
||||
|
||||
/* Two extra checks for NFS only */
|
||||
if (no_formal_ino) {
|
||||
@ -182,16 +230,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
|
||||
error = -EIO;
|
||||
if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM)
|
||||
goto fail_iput;
|
||||
|
||||
error = 0;
|
||||
}
|
||||
return inode;
|
||||
|
||||
fail:
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
return error ? ERR_PTR(error) : inode;
|
||||
fail_iput:
|
||||
iput(inode);
|
||||
goto fail;
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
|
||||
@ -236,8 +280,8 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
|
||||
struct gfs2_holder d_gh;
|
||||
int error = 0;
|
||||
struct inode *inode = NULL;
|
||||
int unlock = 0;
|
||||
|
||||
gfs2_holder_mark_uninitialized(&d_gh);
|
||||
if (!name->len || name->len > GFS2_FNAMESIZE)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
@ -252,7 +296,6 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
|
||||
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
|
||||
if (error)
|
||||
return ERR_PTR(error);
|
||||
unlock = 1;
|
||||
}
|
||||
|
||||
if (!is_root) {
|
||||
@ -265,7 +308,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
|
||||
if (IS_ERR(inode))
|
||||
error = PTR_ERR(inode);
|
||||
out:
|
||||
if (unlock)
|
||||
if (gfs2_holder_initialized(&d_gh))
|
||||
gfs2_glock_dq_uninit(&d_gh);
|
||||
if (error == -ENOENT)
|
||||
return NULL;
|
||||
@ -1309,7 +1352,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
|
||||
struct gfs2_inode *ip = GFS2_I(d_inode(odentry));
|
||||
struct gfs2_inode *nip = NULL;
|
||||
struct gfs2_sbd *sdp = GFS2_SB(odir);
|
||||
struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, };
|
||||
struct gfs2_holder ghs[5], r_gh;
|
||||
struct gfs2_rgrpd *nrgd;
|
||||
unsigned int num_gh;
|
||||
int dir_rename = 0;
|
||||
@ -1317,6 +1360,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
|
||||
unsigned int x;
|
||||
int error;
|
||||
|
||||
gfs2_holder_mark_uninitialized(&r_gh);
|
||||
if (d_really_is_positive(ndentry)) {
|
||||
nip = GFS2_I(d_inode(ndentry));
|
||||
if (ip == nip)
|
||||
@ -1506,7 +1550,7 @@ out_gunlock:
|
||||
gfs2_holder_uninit(ghs + x);
|
||||
}
|
||||
out_gunlock_r:
|
||||
if (r_gh.gh_gl)
|
||||
if (gfs2_holder_initialized(&r_gh))
|
||||
gfs2_glock_dq_uninit(&r_gh);
|
||||
out:
|
||||
return error;
|
||||
@ -1532,13 +1576,14 @@ static int gfs2_exchange(struct inode *odir, struct dentry *odentry,
|
||||
struct gfs2_inode *oip = GFS2_I(odentry->d_inode);
|
||||
struct gfs2_inode *nip = GFS2_I(ndentry->d_inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(odir);
|
||||
struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, };
|
||||
struct gfs2_holder ghs[5], r_gh;
|
||||
unsigned int num_gh;
|
||||
unsigned int x;
|
||||
umode_t old_mode = oip->i_inode.i_mode;
|
||||
umode_t new_mode = nip->i_inode.i_mode;
|
||||
int error;
|
||||
|
||||
gfs2_holder_mark_uninitialized(&r_gh);
|
||||
error = gfs2_rindex_update(sdp);
|
||||
if (error)
|
||||
return error;
|
||||
@ -1646,7 +1691,7 @@ out_gunlock:
|
||||
gfs2_holder_uninit(ghs + x);
|
||||
}
|
||||
out_gunlock_r:
|
||||
if (r_gh.gh_gl)
|
||||
if (gfs2_holder_initialized(&r_gh))
|
||||
gfs2_glock_dq_uninit(&r_gh);
|
||||
out:
|
||||
return error;
|
||||
@ -1743,9 +1788,8 @@ int gfs2_permission(struct inode *inode, int mask)
|
||||
struct gfs2_inode *ip;
|
||||
struct gfs2_holder i_gh;
|
||||
int error;
|
||||
int unlock = 0;
|
||||
|
||||
|
||||
gfs2_holder_mark_uninitialized(&i_gh);
|
||||
ip = GFS2_I(inode);
|
||||
if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
|
||||
if (mask & MAY_NOT_BLOCK)
|
||||
@ -1753,14 +1797,13 @@ int gfs2_permission(struct inode *inode, int mask)
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
|
||||
if (error)
|
||||
return error;
|
||||
unlock = 1;
|
||||
}
|
||||
|
||||
if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
|
||||
error = -EACCES;
|
||||
else
|
||||
error = generic_permission(inode, mask);
|
||||
if (unlock)
|
||||
if (gfs2_holder_initialized(&i_gh))
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
|
||||
return error;
|
||||
@ -1932,17 +1975,16 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_holder gh;
|
||||
int error;
|
||||
int unlock = 0;
|
||||
|
||||
gfs2_holder_mark_uninitialized(&gh);
|
||||
if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
|
||||
if (error)
|
||||
return error;
|
||||
unlock = 1;
|
||||
}
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
if (unlock)
|
||||
if (gfs2_holder_initialized(&gh))
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
|
||||
return 0;
|
||||
|
@ -94,11 +94,11 @@ err:
|
||||
}
|
||||
|
||||
extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
|
||||
u64 no_addr, u64 no_formal_ino);
|
||||
u64 no_addr, u64 no_formal_ino,
|
||||
unsigned int blktype);
|
||||
extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
|
||||
u64 *no_formal_ino,
|
||||
unsigned int blktype);
|
||||
extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr);
|
||||
|
||||
extern int gfs2_inode_refresh(struct gfs2_inode *ip);
|
||||
|
||||
|
@ -535,9 +535,9 @@ static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
|
||||
if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
|
||||
return 0;
|
||||
|
||||
gfs2_replay_incr_blk(sdp, &start);
|
||||
gfs2_replay_incr_blk(jd, &start);
|
||||
|
||||
for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
|
||||
for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
|
||||
blkno = be64_to_cpu(*ptr++);
|
||||
|
||||
jd->jd_found_blocks++;
|
||||
@ -693,7 +693,7 @@ static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
|
||||
|
||||
offset = sizeof(struct gfs2_log_descriptor);
|
||||
|
||||
for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
|
||||
for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
|
||||
error = gfs2_replay_read_block(jd, start, &bh);
|
||||
if (error)
|
||||
return error;
|
||||
@ -762,7 +762,6 @@ static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
|
||||
__be64 *ptr, int pass)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
struct gfs2_glock *gl = ip->i_gl;
|
||||
unsigned int blks = be32_to_cpu(ld->ld_data1);
|
||||
struct buffer_head *bh_log, *bh_ip;
|
||||
@ -773,8 +772,8 @@ static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
|
||||
if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
|
||||
return 0;
|
||||
|
||||
gfs2_replay_incr_blk(sdp, &start);
|
||||
for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
|
||||
gfs2_replay_incr_blk(jd, &start);
|
||||
for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
|
||||
blkno = be64_to_cpu(*ptr++);
|
||||
esc = be64_to_cpu(*ptr++);
|
||||
|
||||
|
@ -45,6 +45,7 @@ static void gfs2_init_inode_once(void *foo)
|
||||
memset(&ip->i_res, 0, sizeof(ip->i_res));
|
||||
RB_CLEAR_NODE(&ip->i_res.rs_node);
|
||||
ip->i_hash_cache = NULL;
|
||||
gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
|
||||
}
|
||||
|
||||
static void gfs2_init_glock_once(void *foo)
|
||||
|
@ -454,7 +454,8 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
|
||||
struct dentry *dentry;
|
||||
struct inode *inode;
|
||||
|
||||
inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
|
||||
inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0,
|
||||
GFS2_BLKST_FREE /* ignore */);
|
||||
if (IS_ERR(inode)) {
|
||||
fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
|
||||
return PTR_ERR(inode);
|
||||
|
@ -883,7 +883,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
||||
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
|
||||
&data_blocks, &ind_blocks);
|
||||
|
||||
ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
|
||||
ghs = kmalloc(num_qd * sizeof(struct gfs2_holder), GFP_NOFS);
|
||||
if (!ghs)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -338,7 +338,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
|
||||
struct gfs2_log_header_host lh;
|
||||
error = get_log_header(jd, start, &lh);
|
||||
if (!error) {
|
||||
gfs2_replay_incr_blk(sdp, &start);
|
||||
gfs2_replay_incr_blk(jd, &start);
|
||||
brelse(bh);
|
||||
continue;
|
||||
}
|
||||
@ -360,7 +360,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
|
||||
}
|
||||
|
||||
while (length--)
|
||||
gfs2_replay_incr_blk(sdp, &start);
|
||||
gfs2_replay_incr_blk(jd, &start);
|
||||
|
||||
brelse(bh);
|
||||
}
|
||||
@ -390,7 +390,7 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
|
||||
struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
|
||||
|
||||
lblock = head->lh_blkno;
|
||||
gfs2_replay_incr_blk(sdp, &lblock);
|
||||
gfs2_replay_incr_blk(jd, &lblock);
|
||||
bh_map.b_size = 1 << ip->i_inode.i_blkbits;
|
||||
error = gfs2_block_map(&ip->i_inode, lblock, &bh_map, 0);
|
||||
if (error)
|
||||
|
@ -14,9 +14,9 @@
|
||||
|
||||
extern struct workqueue_struct *gfs_recovery_wq;
|
||||
|
||||
static inline void gfs2_replay_incr_blk(struct gfs2_sbd *sdp, unsigned int *blk)
|
||||
static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, unsigned int *blk)
|
||||
{
|
||||
if (++*blk == sdp->sd_jdesc->jd_blocks)
|
||||
if (++*blk == jd->jd_blocks)
|
||||
*blk = 0;
|
||||
}
|
||||
|
||||
|
@ -658,6 +658,7 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
|
||||
if (rgd) {
|
||||
spin_lock(&rgd->rd_rsspin);
|
||||
__rs_deltree(rs);
|
||||
BUG_ON(rs->rs_free);
|
||||
spin_unlock(&rgd->rd_rsspin);
|
||||
}
|
||||
}
|
||||
@ -671,10 +672,8 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
|
||||
void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
|
||||
{
|
||||
down_write(&ip->i_rw_mutex);
|
||||
if ((wcount == NULL) || (atomic_read(wcount) <= 1)) {
|
||||
if ((wcount == NULL) || (atomic_read(wcount) <= 1))
|
||||
gfs2_rs_deltree(&ip->i_res);
|
||||
BUG_ON(ip->i_res.rs_free);
|
||||
}
|
||||
up_write(&ip->i_rw_mutex);
|
||||
gfs2_qa_delete(ip, wcount);
|
||||
}
|
||||
@ -722,6 +721,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
|
||||
|
||||
gfs2_free_clones(rgd);
|
||||
kfree(rgd->rd_bits);
|
||||
rgd->rd_bits = NULL;
|
||||
return_all_reservations(rgd);
|
||||
kmem_cache_free(gfs2_rgrpd_cachep, rgd);
|
||||
}
|
||||
@ -916,9 +916,6 @@ static int read_rindex_entry(struct gfs2_inode *ip)
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
rgd->rd_gl->gl_object = rgd;
|
||||
rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
|
||||
rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
|
||||
rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
|
||||
rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
|
||||
if (rgd->rd_data > sdp->sd_max_rg_data)
|
||||
@ -926,14 +923,20 @@ static int read_rindex_entry(struct gfs2_inode *ip)
|
||||
spin_lock(&sdp->sd_rindex_spin);
|
||||
error = rgd_insert(rgd);
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
if (!error)
|
||||
if (!error) {
|
||||
rgd->rd_gl->gl_object = rgd;
|
||||
rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
|
||||
rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
|
||||
rgd->rd_length) * bsize) - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
error = 0; /* someone else read in the rgrp; free it and ignore it */
|
||||
gfs2_glock_put(rgd->rd_gl);
|
||||
|
||||
fail:
|
||||
kfree(rgd->rd_bits);
|
||||
rgd->rd_bits = NULL;
|
||||
kmem_cache_free(gfs2_rgrpd_cachep, rgd);
|
||||
return error;
|
||||
}
|
||||
@ -2096,7 +2099,7 @@ void gfs2_inplace_release(struct gfs2_inode *ip)
|
||||
{
|
||||
struct gfs2_blkreserv *rs = &ip->i_res;
|
||||
|
||||
if (rs->rs_rgd_gh.gh_gl)
|
||||
if (gfs2_holder_initialized(&rs->rs_rgd_gh))
|
||||
gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
|
||||
}
|
||||
|
||||
@ -2596,7 +2599,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
|
||||
{
|
||||
unsigned int x;
|
||||
|
||||
rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
|
||||
rlist->rl_ghs = kmalloc(rlist->rl_rgrps * sizeof(struct gfs2_holder),
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
for (x = 0; x < rlist->rl_rgrps; x++)
|
||||
gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
|
||||
|
@ -855,7 +855,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
||||
wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
|
||||
gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
|
||||
|
||||
if (freeze_gh.gh_gl)
|
||||
if (gfs2_holder_initialized(&freeze_gh))
|
||||
gfs2_glock_dq_uninit(&freeze_gh);
|
||||
|
||||
gfs2_quota_cleanup(sdp);
|
||||
@ -1033,7 +1033,7 @@ static int gfs2_unfreeze(struct super_block *sb)
|
||||
|
||||
mutex_lock(&sdp->sd_freeze_mutex);
|
||||
if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
|
||||
sdp->sd_freeze_gh.gh_gl == NULL) {
|
||||
!gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
return 0;
|
||||
}
|
||||
@ -1084,9 +1084,11 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
|
||||
int error = 0, err;
|
||||
|
||||
memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
|
||||
gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
|
||||
gha = kmalloc(slots * sizeof(struct gfs2_holder), GFP_KERNEL);
|
||||
if (!gha)
|
||||
return -ENOMEM;
|
||||
for (x = 0; x < slots; x++)
|
||||
gfs2_holder_mark_uninitialized(gha + x);
|
||||
|
||||
rgd_next = gfs2_rgrpd_get_first(sdp);
|
||||
|
||||
@ -1096,7 +1098,7 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
|
||||
for (x = 0; x < slots; x++) {
|
||||
gh = gha + x;
|
||||
|
||||
if (gh->gh_gl && gfs2_glock_poll(gh)) {
|
||||
if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
|
||||
err = gfs2_glock_wait(gh);
|
||||
if (err) {
|
||||
gfs2_holder_uninit(gh);
|
||||
@ -1109,7 +1111,7 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
|
||||
}
|
||||
}
|
||||
|
||||
if (gh->gh_gl)
|
||||
if (gfs2_holder_initialized(gh))
|
||||
done = 0;
|
||||
else if (rgd_next && !error) {
|
||||
error = gfs2_glock_nq_init(rgd_next->rd_gl,
|
||||
@ -1304,9 +1306,11 @@ static int gfs2_drop_inode(struct inode *inode)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
|
||||
if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) && inode->i_nlink) {
|
||||
if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
|
||||
inode->i_nlink &&
|
||||
gfs2_holder_initialized(&ip->i_iopen_gh)) {
|
||||
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
|
||||
if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
|
||||
if (test_bit(GLF_DEMOTE, &gl->gl_flags))
|
||||
clear_nlink(inode);
|
||||
}
|
||||
return generic_drop_inode(inode);
|
||||
@ -1551,7 +1555,7 @@ static void gfs2_evict_inode(struct inode *inode)
|
||||
goto out_truncate;
|
||||
}
|
||||
|
||||
if (ip->i_iopen_gh.gh_gl &&
|
||||
if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
|
||||
test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
|
||||
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
|
||||
gfs2_glock_dq_wait(&ip->i_iopen_gh);
|
||||
@ -1610,7 +1614,7 @@ out_unlock:
|
||||
if (gfs2_rs_active(&ip->i_res))
|
||||
gfs2_rs_deltree(&ip->i_res);
|
||||
|
||||
if (ip->i_iopen_gh.gh_gl) {
|
||||
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
|
||||
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
|
||||
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
|
||||
gfs2_glock_dq_wait(&ip->i_iopen_gh);
|
||||
@ -1632,7 +1636,7 @@ out:
|
||||
gfs2_glock_add_to_lru(ip->i_gl);
|
||||
gfs2_glock_put(ip->i_gl);
|
||||
ip->i_gl = NULL;
|
||||
if (ip->i_iopen_gh.gh_gl) {
|
||||
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
|
||||
ip->i_iopen_gh.gh_gl->gl_object = NULL;
|
||||
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
|
||||
gfs2_glock_dq_wait(&ip->i_iopen_gh);
|
||||
|
@ -208,6 +208,9 @@ void block_invalidatepage(struct page *page, unsigned int offset,
|
||||
unsigned int length);
|
||||
int block_write_full_page(struct page *page, get_block_t *get_block,
|
||||
struct writeback_control *wbc);
|
||||
int __block_write_full_page(struct inode *inode, struct page *page,
|
||||
get_block_t *get_block, struct writeback_control *wbc,
|
||||
bh_end_io_t *handler);
|
||||
int block_read_full_page(struct page*, get_block_t*);
|
||||
int block_is_partially_uptodate(struct page *page, unsigned long from,
|
||||
unsigned long count);
|
||||
|
Loading…
Reference in New Issue
Block a user