forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: GFS2: Fix recovery stuck bug (try #2) GFS2: Fix typo in stuffed file data copy handling Revert "GFS2: recovery stuck on transaction lock" GFS2: Make "try" lock not try quite so hard GFS2: remove dependency on __GFP_NOFAIL GFS2: Simplify gfs2_write_alloc_required GFS2: Wait for journal id on mount if not specified on mount command line GFS2: Use nobh_writepage
This commit is contained in:
commit
3a09b1be53
@ -136,10 +136,7 @@ static int gfs2_writeback_writepage(struct page *page,
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc);
|
||||
if (ret == -EAGAIN)
|
||||
ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
|
||||
return ret;
|
||||
return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -637,9 +634,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
|
||||
}
|
||||
}
|
||||
|
||||
error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
alloc_required = gfs2_write_alloc_required(ip, pos, len);
|
||||
|
||||
if (alloc_required || gfs2_is_jdata(ip))
|
||||
gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
|
||||
|
@ -1040,7 +1040,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
|
||||
goto out;
|
||||
|
||||
if (gfs2_is_stuffed(ip)) {
|
||||
u64 dsize = size + sizeof(struct gfs2_inode);
|
||||
u64 dsize = size + sizeof(struct gfs2_dinode);
|
||||
ip->i_disksize = size;
|
||||
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
|
||||
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
|
||||
@ -1244,13 +1244,12 @@ int gfs2_file_dealloc(struct gfs2_inode *ip)
|
||||
* @ip: the file being written to
|
||||
* @offset: the offset to write to
|
||||
* @len: the number of bytes being written
|
||||
* @alloc_required: set to 1 if an alloc is required, 0 otherwise
|
||||
*
|
||||
* Returns: errno
|
||||
* Returns: 1 if an alloc is required, 0 otherwise
|
||||
*/
|
||||
|
||||
int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
|
||||
unsigned int len, int *alloc_required)
|
||||
unsigned int len)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct buffer_head bh;
|
||||
@ -1258,26 +1257,23 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
|
||||
u64 lblock, lblock_stop, size;
|
||||
u64 end_of_file;
|
||||
|
||||
*alloc_required = 0;
|
||||
|
||||
if (!len)
|
||||
return 0;
|
||||
|
||||
if (gfs2_is_stuffed(ip)) {
|
||||
if (offset + len >
|
||||
sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
|
||||
*alloc_required = 1;
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*alloc_required = 1;
|
||||
shift = sdp->sd_sb.sb_bsize_shift;
|
||||
BUG_ON(gfs2_is_dir(ip));
|
||||
end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift;
|
||||
lblock = offset >> shift;
|
||||
lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
|
||||
if (lblock_stop > end_of_file)
|
||||
return 0;
|
||||
return 1;
|
||||
|
||||
size = (lblock_stop - lblock) << shift;
|
||||
do {
|
||||
@ -1285,12 +1281,11 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
|
||||
bh.b_size = size;
|
||||
gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
|
||||
if (!buffer_mapped(&bh))
|
||||
return 0;
|
||||
return 1;
|
||||
size -= bh.b_size;
|
||||
lblock += (bh.b_size >> ip->i_inode.i_blkbits);
|
||||
} while(size > 0);
|
||||
|
||||
*alloc_required = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -52,6 +52,6 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size);
|
||||
int gfs2_truncatei_resume(struct gfs2_inode *ip);
|
||||
int gfs2_file_dealloc(struct gfs2_inode *ip);
|
||||
int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
|
||||
unsigned int len, int *alloc_required);
|
||||
unsigned int len);
|
||||
|
||||
#endif /* __BMAP_DOT_H__ */
|
||||
|
@ -955,7 +955,12 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
|
||||
/* Change the pointers.
|
||||
Don't bother distinguishing stuffed from non-stuffed.
|
||||
This code is complicated enough already. */
|
||||
lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS | __GFP_NOFAIL);
|
||||
lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS);
|
||||
if (!lp) {
|
||||
error = -ENOMEM;
|
||||
goto fail_brelse;
|
||||
}
|
||||
|
||||
/* Change the pointers */
|
||||
for (x = 0; x < half_len; x++)
|
||||
lp[x] = cpu_to_be64(bn);
|
||||
@ -1063,7 +1068,9 @@ static int dir_double_exhash(struct gfs2_inode *dip)
|
||||
|
||||
/* Allocate both the "from" and "to" buffers in one big chunk */
|
||||
|
||||
buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS | __GFP_NOFAIL);
|
||||
buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) {
|
||||
error = gfs2_dir_read_data(dip, (char *)buf,
|
||||
|
@ -351,7 +351,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
unsigned long last_index;
|
||||
u64 pos = page->index << PAGE_CACHE_SHIFT;
|
||||
unsigned int data_blocks, ind_blocks, rblocks;
|
||||
int alloc_required = 0;
|
||||
struct gfs2_holder gh;
|
||||
struct gfs2_alloc *al;
|
||||
int ret;
|
||||
@ -364,8 +363,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
|
||||
set_bit(GIF_SW_PAGED, &ip->i_flags);
|
||||
|
||||
ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required);
|
||||
if (ret || !alloc_required)
|
||||
if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE))
|
||||
goto out_unlock;
|
||||
ret = -ENOMEM;
|
||||
al = gfs2_alloc_get(ip);
|
||||
|
103
fs/gfs2/glock.c
103
fs/gfs2/glock.c
@ -327,6 +327,30 @@ static void gfs2_holder_wake(struct gfs2_holder *gh)
|
||||
wake_up_bit(&gh->gh_iflags, HIF_WAIT);
|
||||
}
|
||||
|
||||
/**
|
||||
* do_error - Something unexpected has happened during a lock request
|
||||
*
|
||||
*/
|
||||
|
||||
static inline void do_error(struct gfs2_glock *gl, const int ret)
|
||||
{
|
||||
struct gfs2_holder *gh, *tmp;
|
||||
|
||||
list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
|
||||
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
|
||||
continue;
|
||||
if (ret & LM_OUT_ERROR)
|
||||
gh->gh_error = -EIO;
|
||||
else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
|
||||
gh->gh_error = GLR_TRYFAILED;
|
||||
else
|
||||
continue;
|
||||
list_del_init(&gh->gh_list);
|
||||
trace_gfs2_glock_queue(gh, 0);
|
||||
gfs2_holder_wake(gh);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* do_promote - promote as many requests as possible on the current queue
|
||||
* @gl: The glock
|
||||
@ -375,35 +399,12 @@ restart:
|
||||
}
|
||||
if (gh->gh_list.prev == &gl->gl_holders)
|
||||
return 1;
|
||||
do_error(gl, 0);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* do_error - Something unexpected has happened during a lock request
|
||||
*
|
||||
*/
|
||||
|
||||
static inline void do_error(struct gfs2_glock *gl, const int ret)
|
||||
{
|
||||
struct gfs2_holder *gh, *tmp;
|
||||
|
||||
list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
|
||||
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
|
||||
continue;
|
||||
if (ret & LM_OUT_ERROR)
|
||||
gh->gh_error = -EIO;
|
||||
else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
|
||||
gh->gh_error = GLR_TRYFAILED;
|
||||
else
|
||||
continue;
|
||||
list_del_init(&gh->gh_list);
|
||||
trace_gfs2_glock_queue(gh, 0);
|
||||
gfs2_holder_wake(gh);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* find_first_waiter - find the first gh that's waiting for the glock
|
||||
* @gl: the glock
|
||||
@ -706,18 +707,8 @@ static void glock_work_func(struct work_struct *work)
|
||||
{
|
||||
unsigned long delay = 0;
|
||||
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
|
||||
struct gfs2_holder *gh;
|
||||
int drop_ref = 0;
|
||||
|
||||
if (unlikely(test_bit(GLF_FROZEN, &gl->gl_flags))) {
|
||||
spin_lock(&gl->gl_spin);
|
||||
gh = find_first_waiter(gl);
|
||||
if (gh && (gh->gh_flags & LM_FLAG_NOEXP) &&
|
||||
test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
|
||||
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
|
||||
finish_xmote(gl, gl->gl_reply);
|
||||
drop_ref = 1;
|
||||
@ -1072,6 +1063,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
add_to_queue(gh);
|
||||
if ((LM_FLAG_NOEXP & gh->gh_flags) &&
|
||||
test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
|
||||
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
|
||||
run_queue(gl, 1);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
|
||||
@ -1328,6 +1322,36 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_should_freeze - Figure out if glock should be frozen
|
||||
* @gl: The glock in question
|
||||
*
|
||||
* Glocks are not frozen if (a) the result of the dlm operation is
|
||||
* an error, (b) the locking operation was an unlock operation or
|
||||
* (c) if there is a "noexp" flagged request anywhere in the queue
|
||||
*
|
||||
* Returns: 1 if freezing should occur, 0 otherwise
|
||||
*/
|
||||
|
||||
static int gfs2_should_freeze(const struct gfs2_glock *gl)
|
||||
{
|
||||
const struct gfs2_holder *gh;
|
||||
|
||||
if (gl->gl_reply & ~LM_OUT_ST_MASK)
|
||||
return 0;
|
||||
if (gl->gl_target == LM_ST_UNLOCKED)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
||||
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
|
||||
continue;
|
||||
if (LM_FLAG_NOEXP & gh->gh_flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_glock_complete - Callback used by locking
|
||||
* @gl: Pointer to the glock
|
||||
@ -1338,19 +1362,18 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
|
||||
void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
|
||||
{
|
||||
struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
|
||||
|
||||
gl->gl_reply = ret;
|
||||
|
||||
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
|
||||
struct gfs2_holder *gh;
|
||||
spin_lock(&gl->gl_spin);
|
||||
gh = find_first_waiter(gl);
|
||||
if ((!(gh && (gh->gh_flags & LM_FLAG_NOEXP)) &&
|
||||
(gl->gl_target != LM_ST_UNLOCKED)) ||
|
||||
((ret & ~LM_OUT_ST_MASK) != 0))
|
||||
if (gfs2_should_freeze(gl)) {
|
||||
set_bit(GLF_FROZEN, &gl->gl_flags);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
if (test_bit(GLF_FROZEN, &gl->gl_flags))
|
||||
return;
|
||||
}
|
||||
spin_unlock(&gl->gl_spin);
|
||||
}
|
||||
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
|
||||
gfs2_glock_hold(gl);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
|
@ -460,6 +460,7 @@ enum {
|
||||
SDF_NOBARRIERS = 3,
|
||||
SDF_NORECOVERY = 4,
|
||||
SDF_DEMOTE = 5,
|
||||
SDF_NOJOURNALID = 6,
|
||||
};
|
||||
|
||||
#define GFS2_FSNAME_LEN 256
|
||||
|
@ -76,7 +76,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
||||
|
||||
sb->s_fs_info = sdp;
|
||||
sdp->sd_vfs = sb;
|
||||
|
||||
set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
|
||||
gfs2_tune_init(&sdp->sd_tune);
|
||||
|
||||
init_waitqueue_head(&sdp->sd_glock_wait);
|
||||
@ -1050,6 +1050,7 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
|
||||
ret = match_int(&tmp[0], &option);
|
||||
if (ret || option < 0)
|
||||
goto hostdata_error;
|
||||
if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags))
|
||||
ls->ls_jid = option;
|
||||
break;
|
||||
case Opt_id:
|
||||
@ -1102,6 +1103,24 @@ void gfs2_lm_unmount(struct gfs2_sbd *sdp)
|
||||
lm->lm_unmount(sdp);
|
||||
}
|
||||
|
||||
static int gfs2_journalid_wait(void *word)
|
||||
{
|
||||
if (signal_pending(current))
|
||||
return -EINTR;
|
||||
schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wait_on_journal(struct gfs2_sbd *sdp)
|
||||
{
|
||||
if (sdp->sd_args.ar_spectator)
|
||||
return 0;
|
||||
if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
|
||||
return 0;
|
||||
|
||||
return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, gfs2_journalid_wait, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
void gfs2_online_uevent(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
@ -1194,6 +1213,10 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
|
||||
if (error)
|
||||
goto fail_locking;
|
||||
|
||||
error = wait_on_journal(sdp);
|
||||
if (error)
|
||||
goto fail_sb;
|
||||
|
||||
error = init_inodes(sdp, DO);
|
||||
if (error)
|
||||
goto fail_sb;
|
||||
|
@ -787,15 +787,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
||||
goto out;
|
||||
|
||||
for (x = 0; x < num_qd; x++) {
|
||||
int alloc_required;
|
||||
|
||||
offset = qd2offset(qda[x]);
|
||||
error = gfs2_write_alloc_required(ip, offset,
|
||||
sizeof(struct gfs2_quota),
|
||||
&alloc_required);
|
||||
if (error)
|
||||
goto out_gunlock;
|
||||
if (alloc_required)
|
||||
if (gfs2_write_alloc_required(ip, offset,
|
||||
sizeof(struct gfs2_quota)))
|
||||
nalloc++;
|
||||
}
|
||||
|
||||
@ -1584,10 +1578,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
|
||||
goto out_i;
|
||||
|
||||
offset = qd2offset(qd);
|
||||
error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota),
|
||||
&alloc_required);
|
||||
if (error)
|
||||
goto out_i;
|
||||
alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
|
||||
if (alloc_required) {
|
||||
al = gfs2_alloc_get(ip);
|
||||
if (al == NULL)
|
||||
|
@ -342,8 +342,6 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
int ar;
|
||||
int error;
|
||||
|
||||
if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) ||
|
||||
(ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
|
||||
@ -352,13 +350,12 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
|
||||
}
|
||||
jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
|
||||
|
||||
error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar);
|
||||
if (!error && ar) {
|
||||
if (gfs2_write_alloc_required(ip, 0, ip->i_disksize)) {
|
||||
gfs2_consist_inode(ip);
|
||||
error = -EIO;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -325,6 +325,30 @@ static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
|
||||
return sprintf(buf, "%d\n", ls->ls_first);
|
||||
}
|
||||
|
||||
static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
||||
{
|
||||
unsigned first;
|
||||
int rv;
|
||||
|
||||
rv = sscanf(buf, "%u", &first);
|
||||
if (rv != 1 || first > 1)
|
||||
return -EINVAL;
|
||||
spin_lock(&sdp->sd_jindex_spin);
|
||||
rv = -EBUSY;
|
||||
if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
|
||||
goto out;
|
||||
rv = -EINVAL;
|
||||
if (sdp->sd_args.ar_spectator)
|
||||
goto out;
|
||||
if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
|
||||
goto out;
|
||||
sdp->sd_lockstruct.ls_first = first;
|
||||
rv = 0;
|
||||
out:
|
||||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
return rv ? rv : len;
|
||||
}
|
||||
|
||||
static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
@ -377,14 +401,41 @@ static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
|
||||
return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid);
|
||||
}
|
||||
|
||||
static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
||||
{
|
||||
unsigned jid;
|
||||
int rv;
|
||||
|
||||
rv = sscanf(buf, "%u", &jid);
|
||||
if (rv != 1)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&sdp->sd_jindex_spin);
|
||||
rv = -EINVAL;
|
||||
if (sdp->sd_args.ar_spectator)
|
||||
goto out;
|
||||
if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
|
||||
goto out;
|
||||
rv = -EBUSY;
|
||||
if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
|
||||
goto out;
|
||||
sdp->sd_lockstruct.ls_jid = jid;
|
||||
smp_mb__after_clear_bit();
|
||||
wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
|
||||
rv = 0;
|
||||
out:
|
||||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
return rv ? rv : len;
|
||||
}
|
||||
|
||||
#define GDLM_ATTR(_name,_mode,_show,_store) \
|
||||
static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
|
||||
|
||||
GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
|
||||
GDLM_ATTR(block, 0644, block_show, block_store);
|
||||
GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
|
||||
GDLM_ATTR(jid, 0444, jid_show, NULL);
|
||||
GDLM_ATTR(first, 0444, lkfirst_show, NULL);
|
||||
GDLM_ATTR(jid, 0644, jid_show, jid_store);
|
||||
GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store);
|
||||
GDLM_ATTR(first_done, 0444, first_done_show, NULL);
|
||||
GDLM_ATTR(recover, 0600, NULL, recover_store);
|
||||
GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
|
||||
@ -564,7 +615,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
|
||||
|
||||
add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
|
||||
add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
|
||||
if (!sdp->sd_args.ar_spectator)
|
||||
if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
|
||||
add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid);
|
||||
if (gfs2_uuid_valid(uuid))
|
||||
add_uevent_var(env, "UUID=%pUB", uuid);
|
||||
|
Loading…
Reference in New Issue
Block a user