2006-01-16 16:50:04 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
2008-01-31 16:31:39 +00:00
|
|
|
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
|
2006-01-16 16:50:04 +00:00
|
|
|
*
|
|
|
|
* This copyrighted material is made available to anyone wishing to use,
|
|
|
|
* modify, copy, or redistribute it subject to the terms and conditions
|
2006-09-01 15:05:15 +00:00
|
|
|
* of the GNU General Public License version 2.
|
2006-01-16 16:50:04 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/buffer_head.h>
|
2006-02-27 22:23:27 +00:00
|
|
|
#include <linux/gfs2_ondisk.h>
|
2008-05-21 16:03:22 +00:00
|
|
|
#include <linux/bio.h>
|
2009-10-02 10:54:39 +00:00
|
|
|
#include <linux/posix_acl.h>
|
2006-01-16 16:50:04 +00:00
|
|
|
|
|
|
|
#include "gfs2.h"
|
2006-02-27 22:23:27 +00:00
|
|
|
#include "incore.h"
|
2006-01-16 16:50:04 +00:00
|
|
|
#include "bmap.h"
|
|
|
|
#include "glock.h"
|
|
|
|
#include "glops.h"
|
|
|
|
#include "inode.h"
|
|
|
|
#include "log.h"
|
|
|
|
#include "meta_io.h"
|
|
|
|
#include "recovery.h"
|
|
|
|
#include "rgrp.h"
|
2006-02-27 22:23:27 +00:00
|
|
|
#include "util.h"
|
2006-10-03 15:10:41 +00:00
|
|
|
#include "trans.h"
|
2011-06-15 09:29:37 +00:00
|
|
|
#include "dir.h"
|
2006-01-16 16:50:04 +00:00
|
|
|
|
2011-08-02 12:09:36 +00:00
|
|
|
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
|
|
|
|
{
|
|
|
|
fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n",
|
|
|
|
bh, (unsigned long long)bh->b_blocknr, bh->b_state,
|
|
|
|
bh->b_page->mapping, bh->b_page->flags);
|
|
|
|
fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n",
|
|
|
|
gl->gl_name.ln_type, gl->gl_name.ln_number,
|
|
|
|
gfs2_glock2aspace(gl));
|
|
|
|
gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n");
|
|
|
|
}
|
|
|
|
|
2006-10-03 15:10:41 +00:00
|
|
|
/**
|
2011-04-14 08:54:02 +00:00
|
|
|
* __gfs2_ail_flush - remove all buffers for a given lock from the AIL
|
2006-10-03 15:10:41 +00:00
|
|
|
* @gl: the glock
|
2011-09-07 09:33:25 +00:00
|
|
|
* @fsync: set when called from fsync (not all buffers will be clean)
|
2006-10-03 15:10:41 +00:00
|
|
|
*
|
|
|
|
* None of the buffers should be dirty, locked, or pinned.
|
|
|
|
*/
|
|
|
|
|
2013-07-26 22:09:33 +00:00
|
|
|
static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
|
|
|
|
unsigned int nr_revokes)
|
2006-10-03 15:10:41 +00:00
|
|
|
{
|
|
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
|
|
|
struct list_head *head = &gl->gl_ail_list;
|
2011-09-07 09:33:25 +00:00
|
|
|
struct gfs2_bufdata *bd, *tmp;
|
2006-10-03 15:10:41 +00:00
|
|
|
struct buffer_head *bh;
|
2011-09-07 09:33:25 +00:00
|
|
|
const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
|
2009-02-05 10:12:38 +00:00
|
|
|
|
2011-09-07 09:33:25 +00:00
|
|
|
gfs2_log_lock(sdp);
|
2011-03-11 11:52:25 +00:00
|
|
|
spin_lock(&sdp->sd_ail_lock);
|
2013-07-26 22:09:33 +00:00
|
|
|
list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
|
|
|
|
if (nr_revokes == 0)
|
|
|
|
break;
|
2006-10-03 15:10:41 +00:00
|
|
|
bh = bd->bd_bh;
|
2011-09-07 09:33:25 +00:00
|
|
|
if (bh->b_state & b_state) {
|
|
|
|
if (fsync)
|
|
|
|
continue;
|
2011-08-02 12:09:36 +00:00
|
|
|
gfs2_ail_error(gl, bh);
|
2011-09-07 09:33:25 +00:00
|
|
|
}
|
2007-09-03 10:01:33 +00:00
|
|
|
gfs2_trans_add_revoke(sdp, bd);
|
2013-07-26 22:09:33 +00:00
|
|
|
nr_revokes--;
|
2006-10-03 15:10:41 +00:00
|
|
|
}
|
2012-10-15 09:57:02 +00:00
|
|
|
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
|
2011-03-11 11:52:25 +00:00
|
|
|
spin_unlock(&sdp->sd_ail_lock);
|
2011-09-07 09:33:25 +00:00
|
|
|
gfs2_log_unlock(sdp);
|
2011-04-14 08:54:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
|
|
|
|
{
|
|
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
|
|
|
struct gfs2_trans tr;
|
|
|
|
|
|
|
|
memset(&tr, 0, sizeof(tr));
|
|
|
|
tr.tr_revokes = atomic_read(&gl->gl_ail_count);
|
|
|
|
|
|
|
|
if (!tr.tr_revokes)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* A shortened, inline version of gfs2_trans_begin() */
|
|
|
|
tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
|
|
|
|
tr.tr_ip = (unsigned long)__builtin_return_address(0);
|
2012-09-18 02:50:31 +00:00
|
|
|
sb_start_intwrite(sdp->sd_vfs);
|
2011-04-14 08:54:02 +00:00
|
|
|
gfs2_log_reserve(sdp, tr.tr_reserved);
|
2012-10-15 09:57:02 +00:00
|
|
|
WARN_ON_ONCE(current->journal_info);
|
2011-04-14 08:54:02 +00:00
|
|
|
current->journal_info = &tr;
|
|
|
|
|
2013-07-26 22:09:33 +00:00
|
|
|
__gfs2_ail_flush(gl, 0, tr.tr_revokes);
|
2011-04-14 08:54:02 +00:00
|
|
|
|
|
|
|
gfs2_trans_end(sdp);
|
|
|
|
gfs2_log_flush(sdp, NULL);
|
|
|
|
}
|
2006-10-03 15:10:41 +00:00
|
|
|
|
2011-09-07 09:33:25 +00:00
|
|
|
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
|
2011-04-14 08:54:02 +00:00
|
|
|
{
|
|
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
|
|
|
unsigned int revokes = atomic_read(&gl->gl_ail_count);
|
2013-07-26 22:09:33 +00:00
|
|
|
unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
|
2011-04-14 08:54:02 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!revokes)
|
|
|
|
return;
|
|
|
|
|
2013-07-26 22:09:33 +00:00
|
|
|
while (revokes > max_revokes)
|
|
|
|
max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
|
|
|
|
|
|
|
|
ret = gfs2_trans_begin(sdp, 0, max_revokes);
|
2011-04-14 08:54:02 +00:00
|
|
|
if (ret)
|
|
|
|
return;
|
2013-07-26 22:09:33 +00:00
|
|
|
__gfs2_ail_flush(gl, fsync, max_revokes);
|
2006-10-03 15:10:41 +00:00
|
|
|
gfs2_trans_end(sdp);
|
|
|
|
gfs2_log_flush(sdp, NULL);
|
|
|
|
}
|
2006-07-26 15:27:10 +00:00
|
|
|
|
|
|
|
/**
|
2009-03-09 09:03:51 +00:00
|
|
|
* rgrp_go_sync - sync out the metadata for this glock
|
2006-01-16 16:50:04 +00:00
|
|
|
* @gl: the glock
|
|
|
|
*
|
|
|
|
* Called when demoting or unlocking an EX glock. We must flush
|
|
|
|
* to disk all dirty buffers/pages relating to this glock, and must not
|
|
|
|
* not return to caller to demote/unlock the glock until I/O is complete.
|
|
|
|
*/
|
|
|
|
|
2009-03-09 09:03:51 +00:00
|
|
|
static void rgrp_go_sync(struct gfs2_glock *gl)
|
2006-01-16 16:50:04 +00:00
|
|
|
{
|
2013-12-06 16:19:54 +00:00
|
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
|
|
|
struct address_space *mapping = &sdp->sd_aspace;
|
2011-08-31 15:38:29 +00:00
|
|
|
struct gfs2_rgrpd *rgd;
|
2009-03-09 09:03:51 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
|
2007-01-22 17:15:34 +00:00
|
|
|
return;
|
2012-10-15 09:57:02 +00:00
|
|
|
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
|
2007-01-22 17:15:34 +00:00
|
|
|
|
2013-12-06 16:19:54 +00:00
|
|
|
gfs2_log_flush(sdp, gl);
|
|
|
|
filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
|
|
|
|
error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
|
|
|
|
mapping_set_error(mapping, error);
|
2009-03-09 09:03:51 +00:00
|
|
|
gfs2_ail_empty_gl(gl);
|
GFS2: Use rbtree for resource groups and clean up bitmap buffer ref count scheme
Here is an update of Bob's original rbtree patch which, in addition, also
resolves the rather strange ref counting that was being done relating to
the bitmap blocks.
Originally we had a dual system for journaling resource groups. The metadata
blocks were journaled and also the rgrp itself was added to a list. The reason
for adding the rgrp to the list in the journal was so that the "repolish
clones" code could be run to update the free space, and potentially send any
discard requests when the log was flushed. This was done by comparing the
"cloned" bitmap with what had been written back on disk during the transaction
commit.
Due to this, there was a requirement to hang on to the rgrps' bitmap buffers
until the journal had been flushed. For that reason, there was a rather
complicated set up in the ->go_lock ->go_unlock functions for rgrps involving
both a mutex and a spinlock (the ->sd_rindex_spin) to maintain a reference
count on the buffers.
However, the journal maintains a reference count on the buffers anyway, since
they are being journaled as metadata buffers. So by moving the code which deals
with the post-journal accounting for bitmap blocks to the metadata journaling
code, we can entirely dispense with the rather strange buffer ref counting
scheme and also the requirement to journal the rgrps.
The net result of all this is that the ->sd_rindex_spin is left to do exactly
one job, and that is to look after the rbtree or rgrps.
This patch is designed to be a stepping stone towards using RCU for the rbtree
of resource groups, however the reduction in the number of uses of the
->sd_rindex_spin is likely to have benefits for multi-threaded workloads,
anyway.
The patch retains ->go_lock and ->go_unlock for rgrps, however these maybe also
be removed in future in favour of calling the functions directly where required
in the code. That will allow locking of resource groups without needing to
actually read them in - something that could be useful in speeding up statfs.
In the mean time though it is valid to dereference ->bi_bh only when the rgrp
is locked. This is basically the same rule as before, modulo the references not
being valid until the following journal flush.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Cc: Benjamin Marzinski <bmarzins@redhat.com>
2011-08-31 08:53:19 +00:00
|
|
|
|
2011-08-31 15:38:29 +00:00
|
|
|
spin_lock(&gl->gl_spin);
|
|
|
|
rgd = gl->gl_object;
|
|
|
|
if (rgd)
|
|
|
|
gfs2_free_clones(rgd);
|
|
|
|
spin_unlock(&gl->gl_spin);
|
2006-01-16 16:50:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2009-03-09 09:03:51 +00:00
|
|
|
* rgrp_go_inval - invalidate the metadata for this glock
|
2006-01-16 16:50:04 +00:00
|
|
|
* @gl: the glock
|
|
|
|
* @flags:
|
|
|
|
*
|
2009-03-09 09:03:51 +00:00
|
|
|
* We never used LM_ST_DEFERRED with resource groups, so that we
|
|
|
|
* should always see the metadata flag set here.
|
|
|
|
*
|
2006-01-16 16:50:04 +00:00
|
|
|
*/
|
|
|
|
|
2009-03-09 09:03:51 +00:00
|
|
|
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
|
2006-01-16 16:50:04 +00:00
|
|
|
{
|
2013-12-06 16:19:54 +00:00
|
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
|
|
|
struct address_space *mapping = &sdp->sd_aspace;
|
2006-01-16 16:50:04 +00:00
|
|
|
|
2012-10-15 09:57:02 +00:00
|
|
|
WARN_ON_ONCE(!(flags & DIO_METADATA));
|
2013-12-06 16:19:54 +00:00
|
|
|
gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
|
2013-12-06 10:16:14 +00:00
|
|
|
truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
|
2008-01-31 16:31:39 +00:00
|
|
|
|
2009-03-09 09:03:51 +00:00
|
|
|
if (gl->gl_object) {
|
|
|
|
struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
|
2008-01-31 16:31:39 +00:00
|
|
|
rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
|
|
|
|
}
|
2006-01-16 16:50:04 +00:00
|
|
|
}
|
|
|
|
|
2007-01-22 17:15:34 +00:00
|
|
|
/**
|
|
|
|
* inode_go_sync - Sync the dirty data and/or metadata for an inode glock
|
|
|
|
* @gl: the glock protecting the inode
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void inode_go_sync(struct gfs2_glock *gl)
|
|
|
|
{
|
|
|
|
struct gfs2_inode *ip = gl->gl_object;
|
2009-12-08 12:12:13 +00:00
|
|
|
struct address_space *metamapping = gfs2_glock2aspace(gl);
|
2007-11-02 08:39:34 +00:00
|
|
|
int error;
|
|
|
|
|
2007-01-22 17:15:34 +00:00
|
|
|
if (ip && !S_ISREG(ip->i_inode.i_mode))
|
|
|
|
ip = NULL;
|
2013-12-19 11:04:14 +00:00
|
|
|
if (ip) {
|
|
|
|
if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
|
|
|
|
unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
|
|
|
|
inode_dio_wait(&ip->i_inode);
|
|
|
|
}
|
2009-03-09 09:03:51 +00:00
|
|
|
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
|
|
|
|
return;
|
2007-01-22 17:15:34 +00:00
|
|
|
|
2012-10-15 09:57:02 +00:00
|
|
|
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
|
2009-03-09 09:03:51 +00:00
|
|
|
|
|
|
|
gfs2_log_flush(gl->gl_sbd, gl);
|
|
|
|
filemap_fdatawrite(metamapping);
|
|
|
|
if (ip) {
|
|
|
|
struct address_space *mapping = ip->i_inode.i_mapping;
|
|
|
|
filemap_fdatawrite(mapping);
|
|
|
|
error = filemap_fdatawait(mapping);
|
|
|
|
mapping_set_error(mapping, error);
|
2007-01-22 17:15:34 +00:00
|
|
|
}
|
2009-03-09 09:03:51 +00:00
|
|
|
error = filemap_fdatawait(metamapping);
|
|
|
|
mapping_set_error(metamapping, error);
|
|
|
|
gfs2_ail_empty_gl(gl);
|
2009-04-20 07:58:45 +00:00
|
|
|
/*
|
|
|
|
* Writeback of the data mapping may cause the dirty flag to be set
|
|
|
|
* so we have to clear it again here.
|
|
|
|
*/
|
|
|
|
smp_mb__before_clear_bit();
|
|
|
|
clear_bit(GLF_DIRTY, &gl->gl_flags);
|
2007-01-22 17:15:34 +00:00
|
|
|
}
|
|
|
|
|
2006-01-16 16:50:04 +00:00
|
|
|
/**
|
|
|
|
* inode_go_inval - prepare a inode glock to be released
|
|
|
|
* @gl: the glock
|
|
|
|
* @flags:
|
2009-03-09 09:03:51 +00:00
|
|
|
*
|
|
|
|
* Normally we invlidate everything, but if we are moving into
|
|
|
|
* LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
|
|
|
|
* can keep hold of the metadata, since it won't have changed.
|
2006-01-16 16:50:04 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void inode_go_inval(struct gfs2_glock *gl, int flags)
|
|
|
|
{
|
2006-11-23 15:51:34 +00:00
|
|
|
struct gfs2_inode *ip = gl->gl_object;
|
2006-01-16 16:50:04 +00:00
|
|
|
|
2009-03-09 09:03:51 +00:00
|
|
|
gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
|
|
|
|
|
|
|
|
if (flags & DIO_METADATA) {
|
2009-12-08 12:12:13 +00:00
|
|
|
struct address_space *mapping = gfs2_glock2aspace(gl);
|
2009-03-09 09:03:51 +00:00
|
|
|
truncate_inode_pages(mapping, 0);
|
2009-10-02 10:54:39 +00:00
|
|
|
if (ip) {
|
2006-11-23 15:51:34 +00:00
|
|
|
set_bit(GIF_INVALID, &ip->i_flags);
|
2009-10-02 10:54:39 +00:00
|
|
|
forget_all_cached_acls(&ip->i_inode);
|
2011-06-15 09:29:37 +00:00
|
|
|
gfs2_dir_hash_inval(ip);
|
2009-10-02 10:54:39 +00:00
|
|
|
}
|
2006-11-23 15:51:34 +00:00
|
|
|
}
|
|
|
|
|
2011-06-13 19:27:40 +00:00
|
|
|
if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
|
|
|
|
gfs2_log_flush(gl->gl_sbd, NULL);
|
2009-03-09 09:03:51 +00:00
|
|
|
gl->gl_sbd->sd_rindex_uptodate = 0;
|
2011-06-13 19:27:40 +00:00
|
|
|
}
|
2007-10-15 14:40:33 +00:00
|
|
|
if (ip && S_ISREG(ip->i_inode.i_mode))
|
2006-11-23 15:51:34 +00:00
|
|
|
truncate_inode_pages(ip->i_inode.i_mapping, 0);
|
2006-01-16 16:50:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
|
|
|
|
* @gl: the glock
|
|
|
|
*
|
|
|
|
* Returns: 1 if it's ok
|
|
|
|
*/
|
|
|
|
|
2008-11-20 13:39:47 +00:00
|
|
|
static int inode_go_demote_ok(const struct gfs2_glock *gl)
|
2006-01-16 16:50:04 +00:00
|
|
|
{
|
|
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
2011-01-19 09:30:01 +00:00
|
|
|
struct gfs2_holder *gh;
|
|
|
|
|
2008-11-20 13:39:47 +00:00
|
|
|
if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
|
|
|
|
return 0;
|
2011-01-19 09:30:01 +00:00
|
|
|
|
|
|
|
if (!list_empty(&gl->gl_holders)) {
|
|
|
|
gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
|
|
|
|
if (gh->gh_list.next != &gl->gl_holders)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-20 13:39:47 +00:00
|
|
|
return 1;
|
2006-01-16 16:50:04 +00:00
|
|
|
}
|
|
|
|
|
2011-05-09 12:49:59 +00:00
|
|
|
/**
|
|
|
|
* gfs2_set_nlink - Set the inode's link count based on on-disk info
|
|
|
|
* @inode: The inode in question
|
|
|
|
* @nlink: The link count
|
|
|
|
*
|
|
|
|
* If the link count has hit zero, it must never be raised, whatever the
|
|
|
|
* on-disk inode might say. When new struct inodes are created the link
|
|
|
|
* count is set to 1, so that we can safely use this test even when reading
|
|
|
|
* in on disk information for the first time.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void gfs2_set_nlink(struct inode *inode, u32 nlink)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We will need to review setting the nlink count here in the
|
|
|
|
* light of the forthcoming ro bind mount work. This is a reminder
|
|
|
|
* to do that.
|
|
|
|
*/
|
|
|
|
if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
|
|
|
|
if (nlink == 0)
|
|
|
|
clear_nlink(inode);
|
|
|
|
else
|
2011-10-28 12:13:29 +00:00
|
|
|
set_nlink(inode, nlink);
|
2011-05-09 12:49:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
|
|
|
|
{
|
|
|
|
const struct gfs2_dinode *str = buf;
|
|
|
|
struct timespec atime;
|
|
|
|
u16 height, depth;
|
|
|
|
|
|
|
|
if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
|
|
|
|
goto corrupt;
|
|
|
|
ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
|
|
|
|
ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
|
|
|
|
ip->i_inode.i_rdev = 0;
|
|
|
|
switch (ip->i_inode.i_mode & S_IFMT) {
|
|
|
|
case S_IFBLK:
|
|
|
|
case S_IFCHR:
|
|
|
|
ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
|
|
|
|
be32_to_cpu(str->di_minor));
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
|
2013-02-01 06:08:10 +00:00
|
|
|
i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
|
|
|
|
i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
|
2011-05-09 12:49:59 +00:00
|
|
|
gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
|
|
|
|
i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
|
|
|
|
gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
|
|
|
|
atime.tv_sec = be64_to_cpu(str->di_atime);
|
|
|
|
atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
|
|
|
|
if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
|
|
|
|
ip->i_inode.i_atime = atime;
|
|
|
|
ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
|
|
|
|
ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
|
|
|
|
ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
|
|
|
|
ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
|
|
|
|
|
|
|
|
ip->i_goal = be64_to_cpu(str->di_goal_meta);
|
|
|
|
ip->i_generation = be64_to_cpu(str->di_generation);
|
|
|
|
|
|
|
|
ip->i_diskflags = be32_to_cpu(str->di_flags);
|
2011-06-16 13:06:55 +00:00
|
|
|
ip->i_eattr = be64_to_cpu(str->di_eattr);
|
|
|
|
/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
|
2011-05-09 12:49:59 +00:00
|
|
|
gfs2_set_inode_flags(&ip->i_inode);
|
|
|
|
height = be16_to_cpu(str->di_height);
|
|
|
|
if (unlikely(height > GFS2_MAX_META_HEIGHT))
|
|
|
|
goto corrupt;
|
|
|
|
ip->i_height = (u8)height;
|
|
|
|
|
|
|
|
depth = be16_to_cpu(str->di_depth);
|
|
|
|
if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
|
|
|
|
goto corrupt;
|
|
|
|
ip->i_depth = (u8)depth;
|
|
|
|
ip->i_entries = be32_to_cpu(str->di_entries);
|
|
|
|
|
|
|
|
if (S_ISREG(ip->i_inode.i_mode))
|
|
|
|
gfs2_set_aops(&ip->i_inode);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
corrupt:
|
|
|
|
gfs2_consist_inode(ip);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gfs2_inode_refresh - Refresh the incore copy of the dinode
|
|
|
|
* @ip: The GFS2 inode
|
|
|
|
*
|
|
|
|
* Returns: errno
|
|
|
|
*/
|
|
|
|
|
|
|
|
int gfs2_inode_refresh(struct gfs2_inode *ip)
|
|
|
|
{
|
|
|
|
struct buffer_head *dibh;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = gfs2_meta_inode_buffer(ip, &dibh);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
error = gfs2_dinode_in(ip, dibh->b_data);
|
|
|
|
brelse(dibh);
|
|
|
|
clear_bit(GIF_INVALID, &ip->i_flags);
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2006-01-16 16:50:04 +00:00
|
|
|
/**
|
|
|
|
* inode_go_lock - operation done after an inode lock is locked by a process
|
|
|
|
* @gl: the glock
|
|
|
|
* @flags:
|
|
|
|
*
|
|
|
|
* Returns: errno
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int inode_go_lock(struct gfs2_holder *gh)
|
|
|
|
{
|
|
|
|
struct gfs2_glock *gl = gh->gh_gl;
|
2008-11-18 13:38:48 +00:00
|
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
2006-02-27 22:23:27 +00:00
|
|
|
struct gfs2_inode *ip = gl->gl_object;
|
2006-01-16 16:50:04 +00:00
|
|
|
int error = 0;
|
|
|
|
|
2008-04-29 17:35:48 +00:00
|
|
|
if (!ip || (gh->gh_flags & GL_SKIP))
|
2006-01-16 16:50:04 +00:00
|
|
|
return 0;
|
|
|
|
|
2006-11-01 21:05:38 +00:00
|
|
|
if (test_bit(GIF_INVALID, &ip->i_flags)) {
|
2006-01-16 16:50:04 +00:00
|
|
|
error = gfs2_inode_refresh(ip);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2013-12-19 11:04:14 +00:00
|
|
|
if (gh->gh_state != LM_ST_DEFERRED)
|
|
|
|
inode_dio_wait(&ip->i_inode);
|
|
|
|
|
2008-11-04 10:05:22 +00:00
|
|
|
if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
|
2006-01-16 16:50:04 +00:00
|
|
|
(gl->gl_state == LM_ST_EXCLUSIVE) &&
|
2008-11-18 13:38:48 +00:00
|
|
|
(gh->gh_state == LM_ST_EXCLUSIVE)) {
|
|
|
|
spin_lock(&sdp->sd_trunc_lock);
|
|
|
|
if (list_empty(&ip->i_trunc_list))
|
|
|
|
list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
|
|
|
|
spin_unlock(&sdp->sd_trunc_lock);
|
|
|
|
wake_up(&sdp->sd_quota_wait);
|
|
|
|
return 1;
|
|
|
|
}
|
2006-01-16 16:50:04 +00:00
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2008-05-21 16:03:22 +00:00
|
|
|
/**
|
|
|
|
* inode_go_dump - print information about an inode
|
|
|
|
* @seq: The iterator
|
|
|
|
* @ip: the inode
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2014-01-16 10:31:13 +00:00
|
|
|
static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
|
2008-05-21 16:03:22 +00:00
|
|
|
{
|
|
|
|
const struct gfs2_inode *ip = gl->gl_object;
|
|
|
|
if (ip == NULL)
|
2014-01-16 10:31:13 +00:00
|
|
|
return;
|
2010-08-11 08:53:11 +00:00
|
|
|
gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
|
2008-05-21 16:03:22 +00:00
|
|
|
(unsigned long long)ip->i_no_formal_ino,
|
|
|
|
(unsigned long long)ip->i_no_addr,
|
2008-11-10 10:10:12 +00:00
|
|
|
IF2DT(ip->i_inode.i_mode), ip->i_flags,
|
|
|
|
(unsigned int)ip->i_diskflags,
|
2010-08-11 08:53:11 +00:00
|
|
|
(unsigned long long)i_size_read(&ip->i_inode));
|
2008-05-21 16:03:22 +00:00
|
|
|
}
|
|
|
|
|
2006-01-16 16:50:04 +00:00
|
|
|
/**
|
2007-11-02 08:39:34 +00:00
|
|
|
* trans_go_sync - promote/demote the transaction glock
|
2006-01-16 16:50:04 +00:00
|
|
|
* @gl: the glock
|
|
|
|
* @state: the requested state
|
|
|
|
* @flags:
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2007-11-02 08:39:34 +00:00
|
|
|
static void trans_go_sync(struct gfs2_glock *gl)
|
2006-01-16 16:50:04 +00:00
|
|
|
{
|
|
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
|
|
|
|
|
|
|
if (gl->gl_state != LM_ST_UNLOCKED &&
|
|
|
|
test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
|
|
|
gfs2_meta_syncfs(sdp);
|
|
|
|
gfs2_log_shutdown(sdp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* trans_go_xmote_bh - After promoting/demoting the transaction glock
|
|
|
|
* @gl: the glock
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2008-05-21 16:03:22 +00:00
|
|
|
static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
|
2006-01-16 16:50:04 +00:00
|
|
|
{
|
|
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
2006-06-14 19:32:57 +00:00
|
|
|
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
|
2006-02-27 22:23:27 +00:00
|
|
|
struct gfs2_glock *j_gl = ip->i_gl;
|
2006-10-14 01:47:13 +00:00
|
|
|
struct gfs2_log_header_host head;
|
2006-01-16 16:50:04 +00:00
|
|
|
int error;
|
|
|
|
|
2008-05-21 16:03:22 +00:00
|
|
|
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
2006-11-20 15:37:45 +00:00
|
|
|
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
|
2006-01-16 16:50:04 +00:00
|
|
|
|
|
|
|
error = gfs2_find_jhead(sdp->sd_jdesc, &head);
|
|
|
|
if (error)
|
|
|
|
gfs2_consist(sdp);
|
|
|
|
if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
|
|
|
|
gfs2_consist(sdp);
|
|
|
|
|
|
|
|
/* Initialize some head of the log stuff */
|
|
|
|
if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
|
|
|
|
sdp->sd_log_sequence = head.lh_sequence + 1;
|
|
|
|
gfs2_log_pointers_init(sdp, head.lh_blkno);
|
|
|
|
}
|
|
|
|
}
|
2008-05-21 16:03:22 +00:00
|
|
|
return 0;
|
2006-01-16 16:50:04 +00:00
|
|
|
}
|
|
|
|
|
2008-11-20 13:39:47 +00:00
|
|
|
/**
|
|
|
|
* trans_go_demote_ok
|
|
|
|
* @gl: the glock
|
|
|
|
*
|
|
|
|
* Always returns 0
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int trans_go_demote_ok(const struct gfs2_glock *gl)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-23 23:52:34 +00:00
|
|
|
/**
|
|
|
|
* iopen_go_callback - schedule the dcache entry for the inode to be deleted
|
|
|
|
* @gl: the glock
|
|
|
|
*
|
|
|
|
* gl_spin lock is held while calling this
|
|
|
|
*/
|
2013-04-10 09:26:55 +00:00
|
|
|
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
|
2009-07-23 23:52:34 +00:00
|
|
|
{
|
|
|
|
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
|
2011-03-30 13:17:51 +00:00
|
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
|
|
|
|
2013-04-10 09:26:55 +00:00
|
|
|
if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
|
2011-03-30 13:17:51 +00:00
|
|
|
return;
|
2009-07-23 23:52:34 +00:00
|
|
|
|
|
|
|
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
|
2009-12-08 12:12:13 +00:00
|
|
|
gl->gl_state == LM_ST_SHARED && ip) {
|
2013-10-15 14:18:08 +00:00
|
|
|
gl->gl_lockref.count++;
|
2009-07-23 23:52:34 +00:00
|
|
|
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
|
2013-10-15 14:18:08 +00:00
|
|
|
gl->gl_lockref.count--;
|
2009-07-23 23:52:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-08-30 13:30:00 +00:00
|
|
|
const struct gfs2_glock_operations gfs2_meta_glops = {
|
2006-09-05 14:53:09 +00:00
|
|
|
.go_type = LM_TYPE_META,
|
2006-01-16 16:50:04 +00:00
|
|
|
};
|
|
|
|
|
2006-08-30 13:30:00 +00:00
|
|
|
const struct gfs2_glock_operations gfs2_inode_glops = {
|
2012-10-24 18:41:05 +00:00
|
|
|
.go_sync = inode_go_sync,
|
2006-01-16 16:50:04 +00:00
|
|
|
.go_inval = inode_go_inval,
|
|
|
|
.go_demote_ok = inode_go_demote_ok,
|
|
|
|
.go_lock = inode_go_lock,
|
2008-05-21 16:03:22 +00:00
|
|
|
.go_dump = inode_go_dump,
|
2006-09-05 14:53:09 +00:00
|
|
|
.go_type = LM_TYPE_INODE,
|
2009-12-08 12:12:13 +00:00
|
|
|
.go_flags = GLOF_ASPACE,
|
2006-01-16 16:50:04 +00:00
|
|
|
};
|
|
|
|
|
2006-08-30 13:30:00 +00:00
|
|
|
const struct gfs2_glock_operations gfs2_rgrp_glops = {
|
2012-10-24 18:41:05 +00:00
|
|
|
.go_sync = rgrp_go_sync,
|
2009-03-09 09:03:51 +00:00
|
|
|
.go_inval = rgrp_go_inval,
|
GFS2: Use rbtree for resource groups and clean up bitmap buffer ref count scheme
Here is an update of Bob's original rbtree patch which, in addition, also
resolves the rather strange ref counting that was being done relating to
the bitmap blocks.
Originally we had a dual system for journaling resource groups. The metadata
blocks were journaled and also the rgrp itself was added to a list. The reason
for adding the rgrp to the list in the journal was so that the "repolish
clones" code could be run to update the free space, and potentially send any
discard requests when the log was flushed. This was done by comparing the
"cloned" bitmap with what had been written back on disk during the transaction
commit.
Due to this, there was a requirement to hang on to the rgrps' bitmap buffers
until the journal had been flushed. For that reason, there was a rather
complicated set up in the ->go_lock ->go_unlock functions for rgrps involving
both a mutex and a spinlock (the ->sd_rindex_spin) to maintain a reference
count on the buffers.
However, the journal maintains a reference count on the buffers anyway, since
they are being journaled as metadata buffers. So by moving the code which deals
with the post-journal accounting for bitmap blocks to the metadata journaling
code, we can entirely dispense with the rather strange buffer ref counting
scheme and also the requirement to journal the rgrps.
The net result of all this is that the ->sd_rindex_spin is left to do exactly
one job, and that is to look after the rbtree or rgrps.
This patch is designed to be a stepping stone towards using RCU for the rbtree
of resource groups, however the reduction in the number of uses of the
->sd_rindex_spin is likely to have benefits for multi-threaded workloads,
anyway.
The patch retains ->go_lock and ->go_unlock for rgrps, however these maybe also
be removed in future in favour of calling the functions directly where required
in the code. That will allow locking of resource groups without needing to
actually read them in - something that could be useful in speeding up statfs.
In the mean time though it is valid to dereference ->bi_bh only when the rgrp
is locked. This is basically the same rule as before, modulo the references not
being valid until the following journal flush.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Cc: Benjamin Marzinski <bmarzins@redhat.com>
2011-08-31 08:53:19 +00:00
|
|
|
.go_lock = gfs2_rgrp_go_lock,
|
|
|
|
.go_unlock = gfs2_rgrp_go_unlock,
|
2009-05-20 09:48:47 +00:00
|
|
|
.go_dump = gfs2_rgrp_dump,
|
2006-09-05 14:53:09 +00:00
|
|
|
.go_type = LM_TYPE_RGRP,
|
2013-12-06 16:19:54 +00:00
|
|
|
.go_flags = GLOF_LVB,
|
2006-01-16 16:50:04 +00:00
|
|
|
};
|
|
|
|
|
2006-08-30 13:30:00 +00:00
|
|
|
const struct gfs2_glock_operations gfs2_trans_glops = {
|
2012-10-24 18:41:05 +00:00
|
|
|
.go_sync = trans_go_sync,
|
2006-01-16 16:50:04 +00:00
|
|
|
.go_xmote_bh = trans_go_xmote_bh,
|
2008-11-20 13:39:47 +00:00
|
|
|
.go_demote_ok = trans_go_demote_ok,
|
2006-09-05 14:53:09 +00:00
|
|
|
.go_type = LM_TYPE_NONDISK,
|
2006-01-16 16:50:04 +00:00
|
|
|
};
|
|
|
|
|
2006-08-30 13:30:00 +00:00
|
|
|
const struct gfs2_glock_operations gfs2_iopen_glops = {
|
2006-09-05 14:53:09 +00:00
|
|
|
.go_type = LM_TYPE_IOPEN,
|
2009-07-23 23:52:34 +00:00
|
|
|
.go_callback = iopen_go_callback,
|
2006-01-16 16:50:04 +00:00
|
|
|
};
|
|
|
|
|
2006-08-30 13:30:00 +00:00
|
|
|
const struct gfs2_glock_operations gfs2_flock_glops = {
|
2006-09-05 14:53:09 +00:00
|
|
|
.go_type = LM_TYPE_FLOCK,
|
2006-01-16 16:50:04 +00:00
|
|
|
};
|
|
|
|
|
2006-08-30 13:30:00 +00:00
|
|
|
const struct gfs2_glock_operations gfs2_nondisk_glops = {
|
2006-09-05 14:53:09 +00:00
|
|
|
.go_type = LM_TYPE_NONDISK,
|
2006-01-16 16:50:04 +00:00
|
|
|
};
|
|
|
|
|
2006-08-30 13:30:00 +00:00
|
|
|
const struct gfs2_glock_operations gfs2_quota_glops = {
|
2006-09-05 14:53:09 +00:00
|
|
|
.go_type = LM_TYPE_QUOTA,
|
2012-11-14 18:46:53 +00:00
|
|
|
.go_flags = GLOF_LVB,
|
2006-01-16 16:50:04 +00:00
|
|
|
};
|
|
|
|
|
2006-08-30 13:30:00 +00:00
|
|
|
const struct gfs2_glock_operations gfs2_journal_glops = {
|
2006-09-05 14:53:09 +00:00
|
|
|
.go_type = LM_TYPE_JOURNAL,
|
2006-01-16 16:50:04 +00:00
|
|
|
};
|
|
|
|
|
GFS2: Add a "demote a glock" interface to sysfs
This adds a sysfs file called demote_rq to GFS2's
per filesystem directory. Its possible to use this
file to demote arbitrary glocks in exactly the same
way as if a request had come in from a remote node.
This is intended for testing issues relating to caching
of data under glocks. Despite that, the interface is
generic enough to send requests to any type of glock,
but be careful as its not always safe to send an
arbitrary message to an arbitrary glock. For that reason
and to prevent DoS, this interface is restricted to root
only.
The messages look like this:
<type>:<glocknumber> <mode>
Example:
echo -n "2:13324 EX" >/sys/fs/gfs2/unity:myfs/demote_rq
Which means "please demote inode glock (type 2) number 13324 so that
I can get an EX (exclusive) lock". The lock modes are those which
would normally be sent by a remote node in its callback so if you
want to unlock a glock, you use EX, to demote to shared, use SH or PR
(depending on whether you like GFS2 or DLM lock modes better!).
If the glock doesn't exist, you'll get -ENOENT returned. If the
arguments don't make sense, you'll get -EINVAL returned.
The plan is that this interface will be used in combination with
the blktrace patch which I recently posted for comments although
it is, of course, still useful in its own right.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2009-02-12 13:31:58 +00:00
|
|
|
const struct gfs2_glock_operations *gfs2_glops_list[] = {
|
|
|
|
[LM_TYPE_META] = &gfs2_meta_glops,
|
|
|
|
[LM_TYPE_INODE] = &gfs2_inode_glops,
|
|
|
|
[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
|
|
|
|
[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
|
|
|
|
[LM_TYPE_FLOCK] = &gfs2_flock_glops,
|
|
|
|
[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
|
|
|
|
[LM_TYPE_QUOTA] = &gfs2_quota_glops,
|
|
|
|
[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
|
|
|
|
};
|
|
|
|
|