forked from Minki/linux
[GFS2] Remove GL_NEVER_RECURSE flag
There is no point in keeping this flag since recursion is not now allowed for any glock. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
parent
5965b1f479
commit
579b78a43b
@ -357,7 +357,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
|
||||
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
|
||||
struct gfs2_holder *gh)
|
||||
{
|
||||
flags |= GL_NEVER_RECURSE;
|
||||
INIT_LIST_HEAD(&gh->gh_list);
|
||||
gh->gh_gl = gl;
|
||||
gh->gh_ip = (unsigned long)__builtin_return_address(0);
|
||||
@ -387,7 +386,7 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
|
||||
void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
|
||||
{
|
||||
gh->gh_state = state;
|
||||
gh->gh_flags = flags | GL_NEVER_RECURSE;
|
||||
gh->gh_flags = flags;
|
||||
if (gh->gh_state == LM_ST_EXCLUSIVE)
|
||||
gh->gh_flags |= GL_LOCAL_EXCL;
|
||||
|
||||
@ -731,8 +730,7 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state)
|
||||
} else {
|
||||
spin_unlock(&gl->gl_spin);
|
||||
|
||||
new_gh = gfs2_holder_get(gl, state,
|
||||
LM_FLAG_TRY | GL_NEVER_RECURSE,
|
||||
new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY,
|
||||
GFP_KERNEL | __GFP_NOFAIL),
|
||||
set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
|
||||
set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
|
||||
@ -1336,7 +1334,7 @@ void gfs2_glock_force_drop(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_holder gh;
|
||||
|
||||
gfs2_holder_init(gl, LM_ST_UNLOCKED, GL_NEVER_RECURSE, &gh);
|
||||
gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
|
||||
set_bit(HIF_DEMOTE, &gh.gh_iflags);
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
@ -1401,7 +1399,7 @@ int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
|
||||
}
|
||||
gh = &gr->gr_gh;
|
||||
|
||||
gfs2_holder_init(gl, 0, GL_NEVER_RECURSE, gh);
|
||||
gfs2_holder_init(gl, 0, 0, gh);
|
||||
set_bit(HIF_GREEDY, &gh->gh_iflags);
|
||||
INIT_WORK(&gr->gr_work, greedy_work, gr);
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define GL_NOCACHE 0x00000400
|
||||
#define GL_SYNC 0x00000800
|
||||
#define GL_NOCANCEL 0x00001000
|
||||
#define GL_NEVER_RECURSE 0x00002000
|
||||
#define GL_AOP 0x00004000
|
||||
|
||||
#define GLR_TRYFAILED 13
|
||||
|
@ -223,7 +223,7 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
|
||||
error = gfs2_glock_nq_num(sdp,
|
||||
GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
|
||||
LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | GL_EXACT | GL_NEVER_RECURSE,
|
||||
LM_FLAG_NOEXP | GL_EXACT,
|
||||
&sdp->sd_live_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't acquire live glock: %d\n", error);
|
||||
@ -638,7 +638,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
|
||||
|
||||
ip = sdp->sd_ir_inode->u.generic_ip;
|
||||
error = gfs2_glock_nq_init(ip->i_gl,
|
||||
LM_ST_EXCLUSIVE, GL_NEVER_RECURSE,
|
||||
LM_ST_EXCLUSIVE, 0,
|
||||
&sdp->sd_ir_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't lock local \"ir\" file: %d\n", error);
|
||||
@ -647,7 +647,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
|
||||
|
||||
ip = sdp->sd_sc_inode->u.generic_ip;
|
||||
error = gfs2_glock_nq_init(ip->i_gl,
|
||||
LM_ST_EXCLUSIVE, GL_NEVER_RECURSE,
|
||||
LM_ST_EXCLUSIVE, 0,
|
||||
&sdp->sd_sc_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
|
||||
@ -656,7 +656,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
|
||||
|
||||
ip = sdp->sd_ut_inode->u.generic_ip;
|
||||
error = gfs2_glock_nq_init(ip->i_gl,
|
||||
LM_ST_EXCLUSIVE, GL_NEVER_RECURSE,
|
||||
LM_ST_EXCLUSIVE, 0,
|
||||
&sdp->sd_ut_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't lock local \"ut\" file: %d\n", error);
|
||||
@ -665,7 +665,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
|
||||
|
||||
ip = sdp->sd_qc_inode->u.generic_ip;
|
||||
error = gfs2_glock_nq_init(ip->i_gl,
|
||||
LM_ST_EXCLUSIVE, GL_NEVER_RECURSE,
|
||||
LM_ST_EXCLUSIVE, 0,
|
||||
&sdp->sd_qc_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
|
||||
|
@ -487,8 +487,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
|
||||
error = gfs2_glock_nq_init(sdp->sd_trans_gl,
|
||||
LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
|
||||
GL_NEVER_RECURSE | GL_NOCANCEL |
|
||||
GL_NOCACHE,
|
||||
GL_NOCANCEL | GL_NOCACHE,
|
||||
&t_gh);
|
||||
if (error)
|
||||
goto fail_gunlock_ji;
|
||||
|
@ -469,7 +469,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
|
||||
int error;
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
|
||||
GL_LOCAL_EXCL | GL_NEVER_RECURSE, &t_gh);
|
||||
GL_LOCAL_EXCL, &t_gh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
@ -530,7 +530,7 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
||||
gfs2_statfs_sync(sdp);
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
|
||||
GL_LOCAL_EXCL | GL_NEVER_RECURSE | GL_NOCACHE,
|
||||
GL_LOCAL_EXCL | GL_NOCACHE,
|
||||
&t_gh);
|
||||
if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
|
||||
return error;
|
||||
@ -869,7 +869,7 @@ int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp, struct gfs2_holder *t_gh)
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
|
||||
LM_FLAG_PRIORITY | GL_NEVER_RECURSE | GL_NOCACHE,
|
||||
LM_FLAG_PRIORITY | GL_NOCACHE,
|
||||
t_gh);
|
||||
|
||||
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
|
||||
|
@ -50,8 +50,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
|
||||
sizeof(uint64_t));
|
||||
INIT_LIST_HEAD(&tr->tr_list_buf);
|
||||
|
||||
gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED,
|
||||
GL_NEVER_RECURSE, &tr->tr_t_gh);
|
||||
gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &tr->tr_t_gh);
|
||||
|
||||
error = gfs2_glock_nq(&tr->tr_t_gh);
|
||||
if (error)
|
||||
|
Loading…
Reference in New Issue
Block a user