gfs2: Lock holder cleanup
Make the code more readable by cleaning up the different ways of initializing lock holders and checking for initialized lock holders: mark lock holders as uninitialized by setting the holder's glock to NULL (gfs2_holder_mark_uninitialized) instead of zeroing out the entire object or using a separate flag. Recognize initialized holders by their non-NULL glock (gfs2_holder_initialized). Don't zero out holder objects which are immeditiately initialized via gfs2_holder_init or gfs2_glock_nq_init. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> Signed-off-by: Bob Peterson <rpeterso@redhat.com>
This commit is contained in:
		
							parent
							
								
									cda9dd4207
								
							
						
					
					
						commit
						6df9f9a253
					
				| @ -117,7 +117,7 @@ static int gfs2_dentry_delete(const struct dentry *dentry) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	ginode = GFS2_I(d_inode(dentry)); | ||||
| 	if (!ginode->i_iopen_gh.gh_gl) | ||||
| 	if (!gfs2_holder_initialized(&ginode->i_iopen_gh)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	if (test_bit(GLF_DEMOTE, &ginode->i_iopen_gh.gh_gl->gl_flags)) | ||||
|  | ||||
| @ -1098,7 +1098,7 @@ static void do_unflock(struct file *file, struct file_lock *fl) | ||||
| 
 | ||||
| 	mutex_lock(&fp->f_fl_mutex); | ||||
| 	locks_lock_file_wait(file, fl); | ||||
| 	if (fl_gh->gh_gl) { | ||||
| 	if (gfs2_holder_initialized(fl_gh)) { | ||||
| 		gfs2_glock_dq(fl_gh); | ||||
| 		gfs2_holder_uninit(fl_gh); | ||||
| 	} | ||||
|  | ||||
| @ -801,7 +801,7 @@ void gfs2_holder_uninit(struct gfs2_holder *gh) | ||||
| { | ||||
| 	put_pid(gh->gh_owner_pid); | ||||
| 	gfs2_glock_put(gh->gh_gl); | ||||
| 	gh->gh_gl = NULL; | ||||
| 	gfs2_holder_mark_uninitialized(gh); | ||||
| 	gh->gh_ip = 0; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -247,4 +247,14 @@ extern void gfs2_unregister_debugfs(void); | ||||
| 
 | ||||
| extern const struct lm_lockops gfs2_dlm_ops; | ||||
| 
 | ||||
| static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh) | ||||
| { | ||||
| 	gh->gh_gl = NULL; | ||||
| } | ||||
| 
 | ||||
| static inline bool gfs2_holder_initialized(struct gfs2_holder *gh) | ||||
| { | ||||
| 	return gh->gh_gl; | ||||
| } | ||||
| 
 | ||||
| #endif /* __GLOCK_DOT_H__ */ | ||||
|  | ||||
| @ -127,9 +127,9 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, | ||||
| 	struct gfs2_inode *ip; | ||||
| 	struct gfs2_glock *io_gl = NULL; | ||||
| 	struct gfs2_holder i_gh; | ||||
| 	bool unlock = false; | ||||
| 	int error; | ||||
| 
 | ||||
| 	gfs2_holder_mark_uninitialized(&i_gh); | ||||
| 	inode = gfs2_iget(sb, no_addr); | ||||
| 	if (!inode) | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
| @ -159,7 +159,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, | ||||
| 						   GL_SKIP, &i_gh); | ||||
| 			if (error) | ||||
| 				goto fail_put; | ||||
| 			unlock = true; | ||||
| 
 | ||||
| 			if (blktype != GFS2_BLKST_FREE) { | ||||
| 				error = gfs2_check_blk_type(sdp, no_addr, | ||||
| @ -191,7 +190,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, | ||||
| 		unlock_new_inode(inode); | ||||
| 	} | ||||
| 
 | ||||
| 	if (unlock) | ||||
| 	if (gfs2_holder_initialized(&i_gh)) | ||||
| 		gfs2_glock_dq_uninit(&i_gh); | ||||
| 	return inode; | ||||
| 
 | ||||
| @ -203,7 +202,7 @@ fail_refresh: | ||||
| fail_put: | ||||
| 	if (io_gl) | ||||
| 		gfs2_glock_put(io_gl); | ||||
| 	if (unlock) | ||||
| 	if (gfs2_holder_initialized(&i_gh)) | ||||
| 		gfs2_glock_dq_uninit(&i_gh); | ||||
| 	ip->i_gl->gl_object = NULL; | ||||
| fail: | ||||
| @ -281,8 +280,8 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, | ||||
| 	struct gfs2_holder d_gh; | ||||
| 	int error = 0; | ||||
| 	struct inode *inode = NULL; | ||||
| 	int unlock = 0; | ||||
| 
 | ||||
| 	gfs2_holder_mark_uninitialized(&d_gh); | ||||
| 	if (!name->len || name->len > GFS2_FNAMESIZE) | ||||
| 		return ERR_PTR(-ENAMETOOLONG); | ||||
| 
 | ||||
| @ -297,7 +296,6 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, | ||||
| 		error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); | ||||
| 		if (error) | ||||
| 			return ERR_PTR(error); | ||||
| 		unlock = 1; | ||||
| 	} | ||||
| 
 | ||||
| 	if (!is_root) { | ||||
| @ -310,7 +308,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, | ||||
| 	if (IS_ERR(inode)) | ||||
| 		error = PTR_ERR(inode); | ||||
| out: | ||||
| 	if (unlock) | ||||
| 	if (gfs2_holder_initialized(&d_gh)) | ||||
| 		gfs2_glock_dq_uninit(&d_gh); | ||||
| 	if (error == -ENOENT) | ||||
| 		return NULL; | ||||
| @ -1354,7 +1352,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, | ||||
| 	struct gfs2_inode *ip = GFS2_I(d_inode(odentry)); | ||||
| 	struct gfs2_inode *nip = NULL; | ||||
| 	struct gfs2_sbd *sdp = GFS2_SB(odir); | ||||
| 	struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }; | ||||
| 	struct gfs2_holder ghs[5], r_gh; | ||||
| 	struct gfs2_rgrpd *nrgd; | ||||
| 	unsigned int num_gh; | ||||
| 	int dir_rename = 0; | ||||
| @ -1362,6 +1360,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, | ||||
| 	unsigned int x; | ||||
| 	int error; | ||||
| 
 | ||||
| 	gfs2_holder_mark_uninitialized(&r_gh); | ||||
| 	if (d_really_is_positive(ndentry)) { | ||||
| 		nip = GFS2_I(d_inode(ndentry)); | ||||
| 		if (ip == nip) | ||||
| @ -1551,7 +1550,7 @@ out_gunlock: | ||||
| 		gfs2_holder_uninit(ghs + x); | ||||
| 	} | ||||
| out_gunlock_r: | ||||
| 	if (r_gh.gh_gl) | ||||
| 	if (gfs2_holder_initialized(&r_gh)) | ||||
| 		gfs2_glock_dq_uninit(&r_gh); | ||||
| out: | ||||
| 	return error; | ||||
| @ -1577,13 +1576,14 @@ static int gfs2_exchange(struct inode *odir, struct dentry *odentry, | ||||
| 	struct gfs2_inode *oip = GFS2_I(odentry->d_inode); | ||||
| 	struct gfs2_inode *nip = GFS2_I(ndentry->d_inode); | ||||
| 	struct gfs2_sbd *sdp = GFS2_SB(odir); | ||||
| 	struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }; | ||||
| 	struct gfs2_holder ghs[5], r_gh; | ||||
| 	unsigned int num_gh; | ||||
| 	unsigned int x; | ||||
| 	umode_t old_mode = oip->i_inode.i_mode; | ||||
| 	umode_t new_mode = nip->i_inode.i_mode; | ||||
| 	int error; | ||||
| 
 | ||||
| 	gfs2_holder_mark_uninitialized(&r_gh); | ||||
| 	error = gfs2_rindex_update(sdp); | ||||
| 	if (error) | ||||
| 		return error; | ||||
| @ -1691,7 +1691,7 @@ out_gunlock: | ||||
| 		gfs2_holder_uninit(ghs + x); | ||||
| 	} | ||||
| out_gunlock_r: | ||||
| 	if (r_gh.gh_gl) | ||||
| 	if (gfs2_holder_initialized(&r_gh)) | ||||
| 		gfs2_glock_dq_uninit(&r_gh); | ||||
| out: | ||||
| 	return error; | ||||
| @ -1788,9 +1788,8 @@ int gfs2_permission(struct inode *inode, int mask) | ||||
| 	struct gfs2_inode *ip; | ||||
| 	struct gfs2_holder i_gh; | ||||
| 	int error; | ||||
| 	int unlock = 0; | ||||
| 
 | ||||
| 
 | ||||
| 	gfs2_holder_mark_uninitialized(&i_gh); | ||||
| 	ip = GFS2_I(inode); | ||||
| 	if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) { | ||||
| 		if (mask & MAY_NOT_BLOCK) | ||||
| @ -1798,14 +1797,13 @@ int gfs2_permission(struct inode *inode, int mask) | ||||
| 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); | ||||
| 		if (error) | ||||
| 			return error; | ||||
| 		unlock = 1; | ||||
| 	} | ||||
| 
 | ||||
| 	if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode)) | ||||
| 		error = -EACCES; | ||||
| 	else | ||||
| 		error = generic_permission(inode, mask); | ||||
| 	if (unlock) | ||||
| 	if (gfs2_holder_initialized(&i_gh)) | ||||
| 		gfs2_glock_dq_uninit(&i_gh); | ||||
| 
 | ||||
| 	return error; | ||||
| @ -1977,17 +1975,16 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, | ||||
| 	struct gfs2_inode *ip = GFS2_I(inode); | ||||
| 	struct gfs2_holder gh; | ||||
| 	int error; | ||||
| 	int unlock = 0; | ||||
| 
 | ||||
| 	gfs2_holder_mark_uninitialized(&gh); | ||||
| 	if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) { | ||||
| 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); | ||||
| 		if (error) | ||||
| 			return error; | ||||
| 		unlock = 1; | ||||
| 	} | ||||
| 
 | ||||
| 	generic_fillattr(inode, stat); | ||||
| 	if (unlock) | ||||
| 	if (gfs2_holder_initialized(&gh)) | ||||
| 		gfs2_glock_dq_uninit(&gh); | ||||
| 
 | ||||
| 	return 0; | ||||
|  | ||||
| @ -45,7 +45,7 @@ static void gfs2_init_inode_once(void *foo) | ||||
| 	memset(&ip->i_res, 0, sizeof(ip->i_res)); | ||||
| 	RB_CLEAR_NODE(&ip->i_res.rs_node); | ||||
| 	ip->i_hash_cache = NULL; | ||||
| 	ip->i_iopen_gh.gh_gl = NULL; | ||||
| 	gfs2_holder_mark_uninitialized(&ip->i_iopen_gh); | ||||
| } | ||||
| 
 | ||||
| static void gfs2_init_glock_once(void *foo) | ||||
|  | ||||
| @ -883,7 +883,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | ||||
| 	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), | ||||
| 			      &data_blocks, &ind_blocks); | ||||
| 
 | ||||
| 	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS); | ||||
| 	ghs = kmalloc(num_qd * sizeof(struct gfs2_holder), GFP_NOFS); | ||||
| 	if (!ghs) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
|  | ||||
| @ -2100,7 +2100,7 @@ void gfs2_inplace_release(struct gfs2_inode *ip) | ||||
| { | ||||
| 	struct gfs2_blkreserv *rs = &ip->i_res; | ||||
| 
 | ||||
| 	if (rs->rs_rgd_gh.gh_gl) | ||||
| 	if (gfs2_holder_initialized(&rs->rs_rgd_gh)) | ||||
| 		gfs2_glock_dq_uninit(&rs->rs_rgd_gh); | ||||
| } | ||||
| 
 | ||||
| @ -2600,7 +2600,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state) | ||||
| { | ||||
| 	unsigned int x; | ||||
| 
 | ||||
| 	rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder), | ||||
| 	rlist->rl_ghs = kmalloc(rlist->rl_rgrps * sizeof(struct gfs2_holder), | ||||
| 				GFP_NOFS | __GFP_NOFAIL); | ||||
| 	for (x = 0; x < rlist->rl_rgrps; x++) | ||||
| 		gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, | ||||
|  | ||||
| @ -855,7 +855,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) | ||||
| 	wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0); | ||||
| 	gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks); | ||||
| 
 | ||||
| 	if (freeze_gh.gh_gl) | ||||
| 	if (gfs2_holder_initialized(&freeze_gh)) | ||||
| 		gfs2_glock_dq_uninit(&freeze_gh); | ||||
| 
 | ||||
| 	gfs2_quota_cleanup(sdp); | ||||
| @ -1033,7 +1033,7 @@ static int gfs2_unfreeze(struct super_block *sb) | ||||
| 
 | ||||
| 	mutex_lock(&sdp->sd_freeze_mutex); | ||||
|         if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN || | ||||
| 	    sdp->sd_freeze_gh.gh_gl == NULL) { | ||||
| 	    !gfs2_holder_initialized(&sdp->sd_freeze_gh)) { | ||||
| 		mutex_unlock(&sdp->sd_freeze_mutex); | ||||
|                 return 0; | ||||
| 	} | ||||
| @ -1084,9 +1084,11 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host | ||||
| 	int error = 0, err; | ||||
| 
 | ||||
| 	memset(sc, 0, sizeof(struct gfs2_statfs_change_host)); | ||||
| 	gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL); | ||||
| 	gha = kmalloc(slots * sizeof(struct gfs2_holder), GFP_KERNEL); | ||||
| 	if (!gha) | ||||
| 		return -ENOMEM; | ||||
| 	for (x = 0; x < slots; x++) | ||||
| 		gfs2_holder_mark_uninitialized(gha + x); | ||||
| 
 | ||||
| 	rgd_next = gfs2_rgrpd_get_first(sdp); | ||||
| 
 | ||||
| @ -1096,7 +1098,7 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host | ||||
| 		for (x = 0; x < slots; x++) { | ||||
| 			gh = gha + x; | ||||
| 
 | ||||
| 			if (gh->gh_gl && gfs2_glock_poll(gh)) { | ||||
| 			if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) { | ||||
| 				err = gfs2_glock_wait(gh); | ||||
| 				if (err) { | ||||
| 					gfs2_holder_uninit(gh); | ||||
| @ -1109,7 +1111,7 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host | ||||
| 				} | ||||
| 			} | ||||
| 
 | ||||
| 			if (gh->gh_gl) | ||||
| 			if (gfs2_holder_initialized(gh)) | ||||
| 				done = 0; | ||||
| 			else if (rgd_next && !error) { | ||||
| 				error = gfs2_glock_nq_init(rgd_next->rd_gl, | ||||
| @ -1304,9 +1306,11 @@ static int gfs2_drop_inode(struct inode *inode) | ||||
| { | ||||
| 	struct gfs2_inode *ip = GFS2_I(inode); | ||||
| 
 | ||||
| 	if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) && inode->i_nlink) { | ||||
| 	if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) && | ||||
| 	    inode->i_nlink && | ||||
| 	    gfs2_holder_initialized(&ip->i_iopen_gh)) { | ||||
| 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; | ||||
| 		if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags)) | ||||
| 		if (test_bit(GLF_DEMOTE, &gl->gl_flags)) | ||||
| 			clear_nlink(inode); | ||||
| 	} | ||||
| 	return generic_drop_inode(inode); | ||||
| @ -1551,7 +1555,7 @@ static void gfs2_evict_inode(struct inode *inode) | ||||
| 			goto out_truncate; | ||||
| 	} | ||||
| 
 | ||||
| 	if (ip->i_iopen_gh.gh_gl && | ||||
| 	if (gfs2_holder_initialized(&ip->i_iopen_gh) && | ||||
| 	    test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { | ||||
| 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE; | ||||
| 		gfs2_glock_dq_wait(&ip->i_iopen_gh); | ||||
| @ -1610,7 +1614,7 @@ out_unlock: | ||||
| 	if (gfs2_rs_active(&ip->i_res)) | ||||
| 		gfs2_rs_deltree(&ip->i_res); | ||||
| 
 | ||||
| 	if (ip->i_iopen_gh.gh_gl) { | ||||
| 	if (gfs2_holder_initialized(&ip->i_iopen_gh)) { | ||||
| 		if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { | ||||
| 			ip->i_iopen_gh.gh_flags |= GL_NOCACHE; | ||||
| 			gfs2_glock_dq_wait(&ip->i_iopen_gh); | ||||
| @ -1632,7 +1636,7 @@ out: | ||||
| 	gfs2_glock_add_to_lru(ip->i_gl); | ||||
| 	gfs2_glock_put(ip->i_gl); | ||||
| 	ip->i_gl = NULL; | ||||
| 	if (ip->i_iopen_gh.gh_gl) { | ||||
| 	if (gfs2_holder_initialized(&ip->i_iopen_gh)) { | ||||
| 		ip->i_iopen_gh.gh_gl->gl_object = NULL; | ||||
| 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE; | ||||
| 		gfs2_glock_dq_wait(&ip->i_iopen_gh); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user