From 14d37564fa3dc4e5d4c6828afcd26ac14e6796c5 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 Dec 2016 08:02:03 -0600 Subject: [PATCH 1/8] GFS2: Fix reference to ERR_PTR in gfs2_glock_iter_next This patch fixes a place where function gfs2_glock_iter_next can reference an invalid error pointer. Signed-off-by: Dan Carpenter Signed-off-by: Bob Peterson --- fs/gfs2/glock.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 14cbf60167a7..2928f1209b67 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1802,16 +1802,18 @@ void gfs2_glock_exit(void) static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi) { - do { - gi->gl = rhashtable_walk_next(&gi->hti); + while ((gi->gl = rhashtable_walk_next(&gi->hti))) { if (IS_ERR(gi->gl)) { if (PTR_ERR(gi->gl) == -EAGAIN) continue; gi->gl = NULL; + return; } - /* Skip entries for other sb and dead entries */ - } while ((gi->gl) && ((gi->sdp != gi->gl->gl_name.ln_sbd) || - __lockref_is_dead(&gi->gl->gl_lockref))); + /* Skip entries for other sb and dead entries */ + if (gi->sdp == gi->gl->gl_name.ln_sbd && + !__lockref_is_dead(&gi->gl->gl_lockref)) + return; + } } static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) From 2fcf5cc3be06126f9aa2430ca6d739c8b3c5aaf5 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Fri, 16 Dec 2016 08:01:28 -0600 Subject: [PATCH 2/8] GFS2: Limit number of transaction blocks requested for truncates This patch limits the number of transaction blocks requested during file truncates. If we have very large multi-terabyte files, and want to delete or truncate them, they might span so many resource groups that we overflow the journal blocks, and cause an assert failure. By limiting the number of blocks in the transaction, we prevent this overflow and give other running processes time to do transactions. The limiting factor I chose is sd_log_thresh2 which is currently set to 4/5ths of the journal. This same ratio is used in function gfs2_ail_flush_reqd to determine when a log flush is required. If we make the maximum value less than this, we can get into a infinite hang whereby the log stops moving because the number of used blocks is less than the threshold and the iterative loop needs more, but since we're under the threshold, the log daemon never starts any IO on the log. Signed-off-by: Bob Peterson --- fs/gfs2/bmap.c | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 645721f3ff00..cbad0ef6806f 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -720,6 +720,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrp_list rlist; + struct gfs2_trans *tr; u64 bn, bstart; u32 blen, btotal; __be64 *p; @@ -728,6 +729,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, unsigned int revokes = 0; int x; int error; + int jblocks_rqsted; error = gfs2_rindex_update(sdp); if (error) @@ -791,12 +793,17 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, if (gfs2_rs_active(&ip->i_res)) /* needs to be done with the rgrp glock held */ gfs2_rs_deltree(&ip->i_res); - error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + - RES_INDIRECT + RES_STATFS + RES_QUOTA, - revokes); +restart: + jblocks_rqsted = rg_blocks + RES_DINODE + + RES_INDIRECT + RES_STATFS + RES_QUOTA + + gfs2_struct2blk(sdp, revokes, sizeof(u64)); + if (jblocks_rqsted > atomic_read(&sdp->sd_log_thresh2)) + jblocks_rqsted = atomic_read(&sdp->sd_log_thresh2); + error = gfs2_trans_begin(sdp, jblocks_rqsted, revokes); if (error) goto out_rg_gunlock; + tr = current->journal_info; down_write(&ip->i_rw_mutex); gfs2_trans_add_meta(ip->i_gl, dibh); @@ -810,6 +817,16 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, if (!*p) continue; + /* check for max reasonable journal transaction blocks */ + if (tr->tr_num_buf_new + RES_STATFS + + RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) { + if (rg_blocks >= tr->tr_num_buf_new) + rg_blocks -= tr->tr_num_buf_new; + else + rg_blocks = 0; + break; + } + bn = be64_to_cpu(*p); if (bstart + blen == bn) @@ -827,6 +844,9 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, *p = 0; gfs2_add_inode_blocks(&ip->i_inode, -1); } + if (p == bottom) + rg_blocks = 0; + if (bstart) { __gfs2_free_blocks(ip, bstart, blen, metadata); btotal += blen; @@ -844,6 +864,9 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, gfs2_trans_end(sdp); + if (rg_blocks) + goto restart; + out_rg_gunlock: gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); out_rlist: From f07b352021483a3a38f081dc284928400a9c1d2c Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Thu, 5 Jan 2017 16:01:45 -0500 Subject: [PATCH 3/8] GFS2: Made logd daemon take into account log demand Before this patch, the logd daemon only tried to flush things when the log blocks pinned exceeded a certain threshold. But when we're deleting very large files, it may require a huge number of journal blocks, and that, in turn, may exceed the threshold. This patch factors that into account. Signed-off-by: Bob Peterson --- fs/gfs2/incore.h | 1 + fs/gfs2/log.c | 9 +++++++-- fs/gfs2/ops_fstype.c | 1 + 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index a6a3389a07fc..00d8dc20ea3a 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -794,6 +794,7 @@ struct gfs2_sbd { atomic_t sd_log_thresh1; atomic_t sd_log_thresh2; atomic_t sd_log_blks_free; + atomic_t sd_log_blks_needed; wait_queue_head_t sd_log_waitq; wait_queue_head_t sd_logd_waitq; diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index e58ccef09c91..4df349c7f022 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -349,6 +349,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) if (gfs2_assert_warn(sdp, blks) || gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) return -EINVAL; + atomic_add(blks, &sdp->sd_log_blks_needed); retry: free_blocks = atomic_read(&sdp->sd_log_blks_free); if (unlikely(free_blocks <= wanted)) { @@ -370,6 +371,7 @@ retry: wake_up(&sdp->sd_reserving_log_wait); goto retry; } + atomic_sub(blks, &sdp->sd_log_blks_needed); trace_gfs2_log_blocks(sdp, -blks); /* @@ -891,13 +893,16 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp) static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) { - return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1)); + return (atomic_read(&sdp->sd_log_pinned) + + atomic_read(&sdp->sd_log_blks_needed) >= + atomic_read(&sdp->sd_log_thresh1)); } static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) { unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); - return used_blocks >= atomic_read(&sdp->sd_log_thresh2); + return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >= + atomic_read(&sdp->sd_log_thresh2); } /** diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index ff72ac6439c8..86281a918c7a 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -683,6 +683,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) goto fail_jindex; } + atomic_set(&sdp->sd_log_blks_needed, 0); if (sdp->sd_args.ar_spectator) { sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0); atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); From b63f5e84826b3e1ae81e051a6a7c5a94b657aecb Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Fri, 6 Jan 2017 22:14:28 -0500 Subject: [PATCH 4/8] GFS2: Wake up io waiters whenever a flush is done Before this patch, if a process called function gfs2_log_reserve to reserve some journal blocks, but the journal not enough blocks were free, it would call io_schedule. However, in the log flush daemon, it woke up the waiters only if an gfs2_ail_flush was no longer required. This resulted in situations where processes would wait forever because the number of blocks required was so high that it pushed the journal into a perpetual state of flush being required. This patch changes the logd daemon so that it wakes up io waiters every time the log is actually flushed. Signed-off-by: Bob Peterson --- fs/gfs2/log.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 4df349c7f022..5028a9d00c17 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -918,12 +918,15 @@ int gfs2_logd(void *data) struct gfs2_sbd *sdp = data; unsigned long t = 1; DEFINE_WAIT(wait); + bool did_flush; while (!kthread_should_stop()) { + did_flush = false; if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { gfs2_ail1_empty(sdp); gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); + did_flush = true; } if (gfs2_ail_flush_reqd(sdp)) { @@ -931,9 +934,10 @@ int gfs2_logd(void *data) gfs2_ail1_wait(sdp); gfs2_ail1_empty(sdp); gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); + did_flush = true; } - if (!gfs2_ail_flush_reqd(sdp)) + if (!gfs2_ail_flush_reqd(sdp) || did_flush) wake_up(&sdp->sd_log_waitq); t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; From 9862ca056e654633e521b303f74fb123f7f17e98 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Wed, 25 Jan 2017 12:50:47 -0500 Subject: [PATCH 5/8] GFS2: Switch tr_touched to flag in transaction This patch eliminates the int variable tr_touched in favor of a new flag in the transaction. This is a step toward reducing contention on the gfs2_log_lock spin_lock. Signed-off-by: Bob Peterson --- fs/gfs2/incore.h | 10 +++++++--- fs/gfs2/log.c | 6 +++--- fs/gfs2/meta_io.c | 6 +++--- fs/gfs2/trans.c | 19 ++++++++++--------- 4 files changed, 23 insertions(+), 18 deletions(-) diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 00d8dc20ea3a..c45084ac642d 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -470,15 +470,19 @@ struct gfs2_quota_data { struct rcu_head qd_rcu; }; +enum { + TR_TOUCHED = 1, + TR_ATTACHED = 2, + TR_ALLOCED = 3, +}; + struct gfs2_trans { unsigned long tr_ip; unsigned int tr_blocks; unsigned int tr_revokes; unsigned int tr_reserved; - unsigned int tr_touched:1; - unsigned int tr_attached:1; - unsigned int tr_alloced:1; + unsigned long tr_flags; unsigned int tr_num_buf_new; unsigned int tr_num_databuf_new; diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 5028a9d00c17..4fb76c04e65b 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -799,7 +799,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) { - WARN_ON_ONCE(old->tr_attached != 1); + WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags)); old->tr_num_buf_new += new->tr_num_buf_new; old->tr_num_databuf_new += new->tr_num_databuf_new; @@ -823,9 +823,9 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) if (sdp->sd_log_tr) { gfs2_merge_trans(sdp->sd_log_tr, tr); } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { - gfs2_assert_withdraw(sdp, tr->tr_alloced); + gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); sdp->sd_log_tr = tr; - tr->tr_attached = 1; + set_bit(TR_ATTACHED, &tr->tr_flags); } sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 373639a59782..a88a347cffe3 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -293,7 +293,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, wait_on_buffer(bh); if (unlikely(!buffer_uptodate(bh))) { struct gfs2_trans *tr = current->journal_info; - if (tr && tr->tr_touched) + if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) gfs2_io_error_bh(sdp, bh); brelse(bh); *bhp = NULL; @@ -320,7 +320,7 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh) if (!buffer_uptodate(bh)) { struct gfs2_trans *tr = current->journal_info; - if (tr && tr->tr_touched) + if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) gfs2_io_error_bh(sdp, bh); return -EIO; } @@ -346,7 +346,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, int meta) tr->tr_num_buf_rm++; else tr->tr_num_databuf_rm++; - tr->tr_touched = 1; + set_bit(TR_TOUCHED, &tr->tr_flags); was_pinned = 1; brelse(bh); } diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 0c1bde395062..5d7f5b08d600 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -48,7 +48,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, tr->tr_blocks = blocks; tr->tr_revokes = revokes; tr->tr_reserved = 1; - tr->tr_alloced = 1; + set_bit(TR_ALLOCED, &tr->tr_flags); if (blocks) tr->tr_reserved += 6 + blocks; if (revokes) @@ -78,7 +78,8 @@ static void gfs2_print_trans(const struct gfs2_trans *tr) { pr_warn("Transaction created at: %pSR\n", (void *)tr->tr_ip); pr_warn("blocks=%u revokes=%u reserved=%u touched=%u\n", - tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, tr->tr_touched); + tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, + test_bit(TR_TOUCHED, &tr->tr_flags)); pr_warn("Buf %u/%u Databuf %u/%u Revoke %u/%u\n", tr->tr_num_buf_new, tr->tr_num_buf_rm, tr->tr_num_databuf_new, tr->tr_num_databuf_rm, @@ -89,12 +90,12 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) { struct gfs2_trans *tr = current->journal_info; s64 nbuf; - int alloced = tr->tr_alloced; + int alloced = test_bit(TR_ALLOCED, &tr->tr_flags); BUG_ON(!tr); current->journal_info = NULL; - if (!tr->tr_touched) { + if (!test_bit(TR_TOUCHED, &tr->tr_flags)) { gfs2_log_release(sdp, tr->tr_reserved); if (alloced) { kfree(tr); @@ -112,8 +113,8 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) gfs2_print_trans(tr); gfs2_log_commit(sdp, tr); - if (alloced && !tr->tr_attached) - kfree(tr); + if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags)) + kfree(tr); up_read(&sdp->sd_log_flush_lock); if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) @@ -182,7 +183,7 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) gfs2_log_lock(sdp); } gfs2_assert(sdp, bd->bd_gl == gl); - tr->tr_touched = 1; + set_bit(TR_TOUCHED, &tr->tr_flags); if (list_empty(&bd->bd_list)) { set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); @@ -201,7 +202,7 @@ static void meta_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); tr = current->journal_info; - tr->tr_touched = 1; + set_bit(TR_TOUCHED, &tr->tr_flags); if (!list_empty(&bd->bd_list)) return; set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); @@ -256,7 +257,7 @@ void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) BUG_ON(!list_empty(&bd->bd_list)); gfs2_add_revoke(sdp, bd); - tr->tr_touched = 1; + set_bit(TR_TOUCHED, &tr->tr_flags); tr->tr_num_revoke++; } From 192738b711d3e41a7326347db5d6923d2136b6d0 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Wed, 25 Jan 2017 12:57:42 -0500 Subject: [PATCH 6/8] GFS2: Inline function meta_lo_add This patch simply combines function meta_lo_add with its only caller, trans_add_meta. This makes the code easier to read and will make it easier to reduce contention on gfs2_log_lock. Signed-off-by: Bob Peterson --- fs/gfs2/trans.c | 55 ++++++++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 30 deletions(-) diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 5d7f5b08d600..483c5a7e16d4 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -195,40 +195,14 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) unlock_buffer(bh); } -static void meta_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) -{ - struct gfs2_meta_header *mh; - struct gfs2_trans *tr; - enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); - - tr = current->journal_info; - set_bit(TR_TOUCHED, &tr->tr_flags); - if (!list_empty(&bd->bd_list)) - return; - set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); - set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); - mh = (struct gfs2_meta_header *)bd->bd_bh->b_data; - if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) { - pr_err("Attempting to add uninitialised block to journal (inplace block=%lld)\n", - (unsigned long long)bd->bd_bh->b_blocknr); - BUG(); - } - if (unlikely(state == SFS_FROZEN)) { - printk(KERN_INFO "GFS2:adding buf while frozen\n"); - gfs2_assert_withdraw(sdp, 0); - } - gfs2_pin(sdp, bd->bd_bh); - mh->__pad0 = cpu_to_be64(0); - mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); - list_add(&bd->bd_list, &tr->tr_buf); - tr->tr_num_buf_new++; -} - void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_bufdata *bd; + struct gfs2_meta_header *mh; + struct gfs2_trans *tr; + enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); lock_buffer(bh); gfs2_log_lock(sdp); @@ -246,7 +220,28 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) gfs2_log_lock(sdp); } gfs2_assert(sdp, bd->bd_gl == gl); - meta_lo_add(sdp, bd); + tr = current->journal_info; + set_bit(TR_TOUCHED, &tr->tr_flags); + if (!list_empty(&bd->bd_list)) + goto out_unlock; + set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); + set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); + mh = (struct gfs2_meta_header *)bd->bd_bh->b_data; + if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) { + pr_err("Attempting to add uninitialised block to journal (inplace block=%lld)\n", + (unsigned long long)bd->bd_bh->b_blocknr); + BUG(); + } + if (unlikely(state == SFS_FROZEN)) { + printk(KERN_INFO "GFS2:adding buf while frozen\n"); + gfs2_assert_withdraw(sdp, 0); + } + gfs2_pin(sdp, bd->bd_bh); + mh->__pad0 = cpu_to_be64(0); + mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); + list_add(&bd->bd_list, &tr->tr_buf); + tr->tr_num_buf_new++; +out_unlock: gfs2_log_unlock(sdp); unlock_buffer(bh); } From aacee72061a352d52ef9d3854f8db3b86b93ea16 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 30 Jan 2017 11:51:21 -0500 Subject: [PATCH 7/8] GFS2: Reduce contention on gfs2_log_lock This patch modifies functions gfs2_trans_add_meta and _data so that they check whether the buffer_head is already in a transaction, and if so, avoid taking the gfs2_log_lock. Signed-off-by: Bob Peterson --- fs/gfs2/trans.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 483c5a7e16d4..affef3c066e0 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -170,6 +170,10 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) } lock_buffer(bh); + if (buffer_pinned(bh)) { + set_bit(TR_TOUCHED, &tr->tr_flags); + goto out; + } gfs2_log_lock(sdp); bd = bh->b_private; if (bd == NULL) { @@ -192,6 +196,7 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) list_add_tail(&bd->bd_list, &tr->tr_databuf); } gfs2_log_unlock(sdp); +out: unlock_buffer(bh); } @@ -201,10 +206,14 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_bufdata *bd; struct gfs2_meta_header *mh; - struct gfs2_trans *tr; + struct gfs2_trans *tr = current->journal_info; enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); lock_buffer(bh); + if (buffer_pinned(bh)) { + set_bit(TR_TOUCHED, &tr->tr_flags); + goto out; + } gfs2_log_lock(sdp); bd = bh->b_private; if (bd == NULL) { @@ -220,7 +229,6 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) gfs2_log_lock(sdp); } gfs2_assert(sdp, bd->bd_gl == gl); - tr = current->journal_info; set_bit(TR_TOUCHED, &tr->tr_flags); if (!list_empty(&bd->bd_list)) goto out_unlock; @@ -243,6 +251,7 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) tr->tr_num_buf_new++; out_unlock: gfs2_log_unlock(sdp); +out: unlock_buffer(bh); } From c548a1c175608e268f6495f3f82461303584d1c9 Mon Sep 17 00:00:00 2001 From: Andrew Price Date: Fri, 3 Feb 2017 08:23:47 -0500 Subject: [PATCH 8/8] gfs2: Make gfs2_write_full_page static It only gets called from aops.c and doesn't appear in any headers. Signed-off-by: Andrew Price Signed-off-by: Bob Peterson --- fs/gfs2/aops.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 5a6f52ea2722..755b81e27e4f 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -143,8 +143,8 @@ static int gfs2_writepage(struct page *page, struct writeback_control *wbc) /* This is the same as calling block_write_full_page, but it also * writes pages outside of i_size */ -int gfs2_write_full_page(struct page *page, get_block_t *get_block, - struct writeback_control *wbc) +static int gfs2_write_full_page(struct page *page, get_block_t *get_block, + struct writeback_control *wbc) { struct inode * const inode = page->mapping->host; loff_t i_size = i_size_read(inode);