diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 4701c4aafbf4..8c4c1f871a88 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "gfs2.h" #include "incore.h" @@ -562,11 +563,11 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state) gl->gl_tchange = jiffies; } -static void gfs2_set_demote(struct gfs2_glock *gl) +static void gfs2_set_demote(int nr, struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - set_bit(GLF_DEMOTE, &gl->gl_flags); + set_bit(nr, &gl->gl_flags); smp_mb(); wake_up(&sdp->sd_async_glock_wait); } @@ -958,20 +959,22 @@ static void gfs2_glock_poke(struct gfs2_glock *gl) gfs2_holder_uninit(&gh); } -static bool gfs2_try_evict(struct gfs2_glock *gl) +static void gfs2_try_evict(struct gfs2_glock *gl) { struct gfs2_inode *ip; - bool evicted = false; /* * If there is contention on the iopen glock and we have an inode, try - * to grab and release the inode so that it can be evicted. This will - * allow the remote node to go ahead and delete the inode without us - * having to do it, which will avoid rgrp glock thrashing. + * to grab and release the inode so that it can be evicted. The + * GIF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode + * should not be deleted locally. This will allow the remote node to + * go ahead and delete the inode without us having to do it, which will + * avoid rgrp glock thrashing. * * The remote node is likely still holding the corresponding inode * glock, so it will run before we get to verify that the delete has - * happened below. + * happened below. (Verification is triggered by the call to + * gfs2_queue_verify_delete() in gfs2_evict_inode().) */ spin_lock(&gl->gl_lockref.lock); ip = gl->gl_object; @@ -979,8 +982,14 @@ static bool gfs2_try_evict(struct gfs2_glock *gl) ip = NULL; spin_unlock(&gl->gl_lockref.lock); if (ip) { - gl->gl_no_formal_ino = ip->i_no_formal_ino; - set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); + wait_on_inode(&ip->i_inode); + if (is_bad_inode(&ip->i_inode)) { + iput(&ip->i_inode); + ip = NULL; + } + } + if (ip) { + set_bit(GIF_DEFER_DELETE, &ip->i_flags); d_prune_aliases(&ip->i_inode); iput(&ip->i_inode); @@ -988,7 +997,7 @@ static bool gfs2_try_evict(struct gfs2_glock *gl) spin_lock(&gl->gl_lockref.lock); ip = gl->gl_object; if (ip) { - clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); + clear_bit(GIF_DEFER_DELETE, &ip->i_flags); if (!igrab(&ip->i_inode)) ip = NULL; } @@ -997,9 +1006,7 @@ static bool gfs2_try_evict(struct gfs2_glock *gl) gfs2_glock_poke(ip->i_gl); iput(&ip->i_inode); } - evicted = !ip; } - return evicted; } bool gfs2_queue_try_to_evict(struct gfs2_glock *gl) @@ -1008,18 +1015,18 @@ bool gfs2_queue_try_to_evict(struct gfs2_glock *gl) if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) return false; - return queue_delayed_work(sdp->sd_delete_wq, - &gl->gl_delete, 0); + return !mod_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, 0); } -static bool gfs2_queue_verify_evict(struct gfs2_glock *gl) +bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + unsigned long delay; - if (test_and_set_bit(GLF_VERIFY_EVICT, &gl->gl_flags)) + if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags)) return false; - return queue_delayed_work(sdp->sd_delete_wq, - &gl->gl_delete, 5 * HZ); + delay = later ? HZ + get_random_long() % (HZ * 9) : 0; + return queue_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, delay); } static void delete_work_func(struct work_struct *work) @@ -1027,43 +1034,21 @@ static void delete_work_func(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - struct inode *inode; - u64 no_addr = gl->gl_name.ln_number; + bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags); - if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) { - /* - * If we can evict the inode, give the remote node trying to - * delete the inode some time before verifying that the delete - * has happened. Otherwise, if we cause contention on the inode glock - * immediately, the remote node will think that we still have - * the inode in use, and so it will give up waiting. - * - * If we can't evict the inode, signal to the remote node that - * the inode is still in use. We'll later try to delete the - * inode locally in gfs2_evict_inode. - * - * FIXME: We only need to verify that the remote node has - * deleted the inode because nodes before this remote delete - * rework won't cooperate. At a later time, when we no longer - * care about compatibility with such nodes, we can skip this - * step entirely. - */ - if (gfs2_try_evict(gl)) { - if (test_bit(SDF_KILL, &sdp->sd_flags)) - goto out; - if (gfs2_queue_verify_evict(gl)) - return; - } - goto out; - } + if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) + gfs2_try_evict(gl); + + if (verify_delete) { + u64 no_addr = gl->gl_name.ln_number; + struct inode *inode; - if (test_and_clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags)) { inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, GFS2_BLKST_UNLINKED); if (IS_ERR(inode)) { if (PTR_ERR(inode) == -EAGAIN && !test_bit(SDF_KILL, &sdp->sd_flags) && - gfs2_queue_verify_evict(gl)) + gfs2_queue_verify_delete(gl, true)) return; } else { d_prune_aliases(inode); @@ -1071,7 +1056,6 @@ static void delete_work_func(struct work_struct *work) } } -out: gfs2_glock_put(gl); } @@ -1100,7 +1084,7 @@ static void glock_work_func(struct work_struct *work) if (!delay) { clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); - gfs2_set_demote(gl); + gfs2_set_demote(GLF_DEMOTE, gl); } } run_queue(gl, 0); @@ -1442,10 +1426,7 @@ out: static void request_demote(struct gfs2_glock *gl, unsigned int state, unsigned long delay, bool remote) { - if (delay) - set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); - else - gfs2_set_demote(gl); + gfs2_set_demote(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, gl); if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { gl->gl_demote_state = state; gl->gl_demote_time = jiffies; @@ -1635,12 +1616,6 @@ int gfs2_glock_poll(struct gfs2_holder *gh) return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; } -static inline bool needs_demote(struct gfs2_glock *gl) -{ - return (test_bit(GLF_DEMOTE, &gl->gl_flags) || - test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)); -} - static void __gfs2_glock_dq(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; @@ -1649,8 +1624,8 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh) /* * This holder should not be cached, so mark it for demote. - * Note: this should be done before the check for needs_demote - * below. + * Note: this should be done before the glock_needs_demote + * check below. */ if (gh->gh_flags & GL_NOCACHE) request_demote(gl, LM_ST_UNLOCKED, 0, false); @@ -1663,7 +1638,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh) * If there hasn't been a demote request we are done. * (Let the remaining holders, if any, keep holding it.) */ - if (!needs_demote(gl)) { + if (!glock_needs_demote(gl)) { if (list_empty(&gl->gl_holders)) fast_path = 1; } @@ -2117,7 +2092,7 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) void gfs2_cancel_delete_work(struct gfs2_glock *gl) { clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags); - clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags); + clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags); if (cancel_delayed_work(&gl->gl_delete)) gfs2_glock_put(gl); } @@ -2370,7 +2345,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl) *p++ = 'N'; if (test_bit(GLF_TRY_TO_EVICT, gflags)) *p++ = 'e'; - if (test_bit(GLF_VERIFY_EVICT, gflags)) + if (test_bit(GLF_VERIFY_DELETE, gflags)) *p++ = 'E'; *p = 0; return buf; diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index adf0091cc98f..c171f745650f 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -245,6 +245,7 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); void gfs2_glock_complete(struct gfs2_glock *gl, int ret); bool gfs2_queue_try_to_evict(struct gfs2_glock *gl); +bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later); void gfs2_cancel_delete_work(struct gfs2_glock *gl); void gfs2_flush_delete_work(struct gfs2_sbd *sdp); void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); @@ -284,4 +285,10 @@ static inline bool gfs2_holder_queued(struct gfs2_holder *gh) void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation); bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation); +static inline bool glock_needs_demote(struct gfs2_glock *gl) +{ + return (test_bit(GLF_DEMOTE, &gl->gl_flags) || + test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)); +} + #endif /* __GLOCK_DOT_H__ */ diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 95d8081681dc..eb4714f299ef 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -470,7 +470,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) * Returns: errno */ -int gfs2_inode_refresh(struct gfs2_inode *ip) +static int gfs2_inode_refresh(struct gfs2_inode *ip) { struct buffer_head *dibh; int error; @@ -494,11 +494,18 @@ int gfs2_inode_refresh(struct gfs2_inode *ip) static int inode_go_instantiate(struct gfs2_glock *gl) { struct gfs2_inode *ip = gl->gl_object; + struct gfs2_glock *io_gl; + int error; if (!ip) /* no inode to populate - read it in later */ return 0; - return gfs2_inode_refresh(ip); + error = gfs2_inode_refresh(ip); + if (error) + return error; + io_gl = ip->i_iopen_gh.gh_gl; + io_gl->gl_no_formal_ino = ip->i_no_formal_ino; + return 0; } static int inode_go_held(struct gfs2_holder *gh) diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index aa4ef67a34e0..4e19cce3d906 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -329,7 +329,7 @@ enum { GLF_BLOCKING = 15, GLF_UNLOCKED = 16, /* Wait for glock to be unlocked */ GLF_TRY_TO_EVICT = 17, /* iopen glocks only */ - GLF_VERIFY_EVICT = 18, /* iopen glocks only */ + GLF_VERIFY_DELETE = 18, /* iopen glocks only */ }; struct gfs2_glock { @@ -376,7 +376,7 @@ enum { GIF_SW_PAGED = 3, GIF_FREE_VFS_INODE = 5, GIF_GLOP_PENDING = 6, - GIF_DEFERRED_DELETE = 7, + GIF_DEFER_DELETE = 7, }; struct gfs2_inode { diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 1b95db2c3aac..6fbbaaad1cd0 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -750,6 +750,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, if (error) goto fail_free_inode; gfs2_cancel_delete_work(io_gl); + io_gl->gl_no_formal_ino = ip->i_no_formal_ino; retry: error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr); diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index fd15d1c6b6fb..9e5e1622d50a 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h @@ -93,8 +93,6 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, u64 no_formal_ino, unsigned int blktype); -int gfs2_inode_refresh(struct gfs2_inode *ip); - struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, int is_root); int gfs2_permission(struct mnt_idmap *idmap, diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index fa5134df985f..58aeeae7ed8c 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -224,8 +224,21 @@ static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate) return -1; } +/* Taken from fs/dlm/lock.c. */ + +static bool middle_conversion(int cur, int req) +{ + return (cur == DLM_LOCK_PR && req == DLM_LOCK_CW) || + (cur == DLM_LOCK_CW && req == DLM_LOCK_PR); +} + +static bool down_conversion(int cur, int req) +{ + return !middle_conversion(cur, req) && req < cur; +} + static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, - const int req) + const int cur, const int req) { u32 lkf = 0; @@ -251,7 +264,14 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, if (!test_bit(GLF_INITIAL, &gl->gl_flags)) { lkf |= DLM_LKF_CONVERT; - if (test_bit(GLF_BLOCKING, &gl->gl_flags)) + + /* + * The DLM_LKF_QUECVT flag needs to be set for "first come, + * first served" semantics, but it must only be set for + * "upward" lock conversions or else DLM will reject the + * request as invalid. + */ + if (!down_conversion(cur, req)) lkf |= DLM_LKF_QUECVT; } @@ -271,13 +291,14 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, unsigned int flags) { struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; - int req; + int cur, req; u32 lkf; char strname[GDLM_STRNAME_BYTES] = ""; int error; + cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state); req = make_mode(gl->gl_name.ln_sbd, req_state); - lkf = make_flags(gl, flags, req); + lkf = make_flags(gl, flags, cur, req); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); if (test_bit(GLF_INITIAL, &gl->gl_flags)) { diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 29c772816765..b14e54b38ee8 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1879,7 +1879,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip */ ip = gl->gl_object; - if (ip || !gfs2_queue_try_to_evict(gl)) + if (ip || !gfs2_queue_verify_delete(gl, false)) gfs2_glock_put(gl); else found++; @@ -1987,10 +1987,8 @@ static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs, static u32 gfs2_orlov_skip(const struct gfs2_inode *ip) { const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - u32 skip; - get_random_bytes(&skip, sizeof(skip)); - return skip % sdp->sd_rgrps; + return get_random_u32() % sdp->sd_rgrps; } static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin) diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 6678060ed4d2..92a3b6ddafdc 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -44,10 +44,10 @@ #include "xattr.h" #include "lops.h" -enum dinode_demise { - SHOULD_DELETE_DINODE, - SHOULD_NOT_DELETE_DINODE, - SHOULD_DEFER_EVICTION, +enum evict_behavior { + EVICT_SHOULD_DELETE, + EVICT_SHOULD_SKIP_DELETE, + EVICT_SHOULD_DEFER_DELETE, }; /** @@ -1030,7 +1030,7 @@ static int gfs2_drop_inode(struct inode *inode) if (inode->i_nlink && gfs2_holder_initialized(&ip->i_iopen_gh)) { struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; - if (test_bit(GLF_DEMOTE, &gl->gl_flags)) + if (glock_needs_demote(gl)) clear_nlink(inode); } @@ -1045,7 +1045,7 @@ static int gfs2_drop_inode(struct inode *inode) struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; gfs2_glock_hold(gl); - if (!gfs2_queue_try_to_evict(gl)) + if (!gfs2_queue_verify_delete(gl, true)) gfs2_glock_put_async(gl); return 0; } @@ -1257,7 +1257,7 @@ static void gfs2_glock_put_eventually(struct gfs2_glock *gl) gfs2_glock_put(gl); } -static bool gfs2_upgrade_iopen_glock(struct inode *inode) +static enum evict_behavior gfs2_upgrade_iopen_glock(struct inode *inode) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); @@ -1272,9 +1272,9 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode) * exclusive access to the iopen glock here. * * Otherwise, the other nodes holding the lock will be notified about - * our locking request. If they do not have the inode open, they are - * expected to evict the cached inode and release the lock, allowing us - * to proceed. + * our locking request (see iopen_go_callback()). If they do not have + * the inode open, they are expected to evict the cached inode and + * release the lock, allowing us to proceed. * * Otherwise, if they cannot evict the inode, they are expected to poke * the inode glock (note: not the iopen glock). We will notice that @@ -1290,17 +1290,22 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode) gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh); error = gfs2_glock_nq(gh); if (error) - return false; + return EVICT_SHOULD_SKIP_DELETE; wait_event_interruptible_timeout(sdp->sd_async_glock_wait, !test_bit(HIF_WAIT, &gh->gh_iflags) || - test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags), + glock_needs_demote(ip->i_gl), 5 * HZ); if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) { gfs2_glock_dq(gh); - return false; + if (glock_needs_demote(ip->i_gl)) + return EVICT_SHOULD_SKIP_DELETE; + return EVICT_SHOULD_DEFER_DELETE; } - return gfs2_glock_holder_ready(gh) == 0; + error = gfs2_glock_holder_ready(gh); + if (error) + return EVICT_SHOULD_SKIP_DELETE; + return EVICT_SHOULD_DELETE; } /** @@ -1313,8 +1318,8 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode) * * Returns: the fate of the dinode */ -static enum dinode_demise evict_should_delete(struct inode *inode, - struct gfs2_holder *gh) +static enum evict_behavior evict_should_delete(struct inode *inode, + struct gfs2_holder *gh) { struct gfs2_inode *ip = GFS2_I(inode); struct super_block *sb = inode->i_sb; @@ -1324,12 +1329,12 @@ static enum dinode_demise evict_should_delete(struct inode *inode, if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) goto should_delete; - if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags)) - return SHOULD_DEFER_EVICTION; + if (test_bit(GIF_DEFER_DELETE, &ip->i_flags)) + return EVICT_SHOULD_DEFER_DELETE; /* Deletes should never happen under memory pressure anymore. */ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) - return SHOULD_DEFER_EVICTION; + return EVICT_SHOULD_DEFER_DELETE; /* Must not read inode block until block type has been verified */ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh); @@ -1337,34 +1342,37 @@ static enum dinode_demise evict_should_delete(struct inode *inode, glock_clear_object(ip->i_iopen_gh.gh_gl, ip); ip->i_iopen_gh.gh_flags |= GL_NOCACHE; gfs2_glock_dq_uninit(&ip->i_iopen_gh); - return SHOULD_DEFER_EVICTION; + return EVICT_SHOULD_DEFER_DELETE; } if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino)) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); if (ret) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; ret = gfs2_instantiate(gh); if (ret) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; /* * The inode may have been recreated in the meantime. */ if (inode->i_nlink) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; should_delete: if (gfs2_holder_initialized(&ip->i_iopen_gh) && test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { - if (!gfs2_upgrade_iopen_glock(inode)) { + enum evict_behavior behavior = + gfs2_upgrade_iopen_glock(inode); + + if (behavior != EVICT_SHOULD_DELETE) { gfs2_holder_uninit(&ip->i_iopen_gh); - return SHOULD_NOT_DELETE_DINODE; + return behavior; } } - return SHOULD_DELETE_DINODE; + return EVICT_SHOULD_DELETE; } /** @@ -1475,8 +1483,10 @@ static void gfs2_evict_inode(struct inode *inode) struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; + enum evict_behavior behavior; int ret; + gfs2_holder_mark_uninitialized(&gh); if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr) goto out; @@ -1488,11 +1498,20 @@ static void gfs2_evict_inode(struct inode *inode) if (!sdp->sd_jdesc) goto out; - gfs2_holder_mark_uninitialized(&gh); - ret = evict_should_delete(inode, &gh); - if (ret == SHOULD_DEFER_EVICTION) - goto out; - if (ret == SHOULD_DELETE_DINODE) + behavior = evict_should_delete(inode, &gh); + if (behavior == EVICT_SHOULD_DEFER_DELETE && + !test_bit(SDF_KILL, &sdp->sd_flags)) { + struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl; + + if (io_gl) { + gfs2_glock_hold(io_gl); + if (!gfs2_queue_verify_delete(io_gl, true)) + gfs2_glock_put(io_gl); + goto out; + } + behavior = EVICT_SHOULD_DELETE; + } + if (behavior == EVICT_SHOULD_DELETE) ret = evict_unlinked_inode(inode); else ret = evict_linked_inode(inode); @@ -1500,11 +1519,11 @@ static void gfs2_evict_inode(struct inode *inode) if (gfs2_rs_active(&ip->i_res)) gfs2_rs_deltree(&ip->i_res); - if (gfs2_holder_initialized(&gh)) - gfs2_glock_dq_uninit(&gh); if (ret && ret != GLR_TRYFAILED && ret != -EROFS) fs_warn(sdp, "gfs2_evict_inode: %d\n", ret); out: + if (gfs2_holder_initialized(&gh)) + gfs2_glock_dq_uninit(&gh); truncate_inode_pages_final(&inode->i_data); if (ip->i_qadata) gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0); @@ -1537,11 +1556,13 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb) if (!ip) return NULL; ip->i_no_addr = 0; + ip->i_no_formal_ino = 0; ip->i_flags = 0; ip->i_gl = NULL; gfs2_holder_mark_uninitialized(&ip->i_iopen_gh); memset(&ip->i_res, 0, sizeof(ip->i_res)); RB_CLEAR_NODE(&ip->i_res.rs_node); + ip->i_diskflags = 0; ip->i_rahead = 0; return &ip->i_inode; }