mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
Merge branch 'xfs-extent-list-locking-fixes' into for-next
A set of fixes which makes sure we are taking the ilock whenever accessing the extent list. This was associated with "Access to block zero" messages which may result in extent list corruption.
This commit is contained in:
commit
bf3964c188
@ -1217,7 +1217,7 @@ __xfs_get_blocks(
|
||||
lockmode = XFS_ILOCK_EXCL;
|
||||
xfs_ilock(ip, lockmode);
|
||||
} else {
|
||||
lockmode = xfs_ilock_map_shared(ip);
|
||||
lockmode = xfs_ilock_data_map_shared(ip);
|
||||
}
|
||||
|
||||
ASSERT(offset <= mp->m_super->s_maxbytes);
|
||||
|
@ -164,6 +164,7 @@ xfs_attr_get(
|
||||
{
|
||||
int error;
|
||||
struct xfs_name xname;
|
||||
uint lock_mode;
|
||||
|
||||
XFS_STATS_INC(xs_attr_get);
|
||||
|
||||
@ -174,9 +175,9 @@ xfs_attr_get(
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
lock_mode = xfs_ilock_attr_map_shared(ip);
|
||||
error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags);
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
xfs_iunlock(ip, lock_mode);
|
||||
return(error);
|
||||
}
|
||||
|
||||
|
@ -507,17 +507,17 @@ xfs_attr_list_int(
|
||||
{
|
||||
int error;
|
||||
xfs_inode_t *dp = context->dp;
|
||||
uint lock_mode;
|
||||
|
||||
XFS_STATS_INC(xs_attr_list);
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
|
||||
return EIO;
|
||||
|
||||
xfs_ilock(dp, XFS_ILOCK_SHARED);
|
||||
|
||||
/*
|
||||
* Decide on what work routines to call based on the inode size.
|
||||
*/
|
||||
lock_mode = xfs_ilock_attr_map_shared(dp);
|
||||
if (!xfs_inode_hasattr(dp)) {
|
||||
error = 0;
|
||||
} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
|
||||
@ -527,9 +527,7 @@ xfs_attr_list_int(
|
||||
} else {
|
||||
error = xfs_attr_node_list(context);
|
||||
}
|
||||
|
||||
xfs_iunlock(dp, XFS_ILOCK_SHARED);
|
||||
|
||||
xfs_iunlock(dp, lock_mode);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -4013,6 +4013,7 @@ xfs_bmapi_read(
|
||||
ASSERT(*nmap >= 1);
|
||||
ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
|
||||
XFS_BMAPI_IGSTATE)));
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
|
||||
|
||||
if (unlikely(XFS_TEST_ERROR(
|
||||
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
|
||||
@ -4207,6 +4208,7 @@ xfs_bmapi_delay(
|
||||
ASSERT(*nmap >= 1);
|
||||
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
|
||||
ASSERT(!(flags & ~XFS_BMAPI_ENTIRE));
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
|
||||
if (unlikely(XFS_TEST_ERROR(
|
||||
(XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
|
||||
@ -4500,6 +4502,7 @@ xfs_bmapi_write(
|
||||
ASSERT(tp != NULL);
|
||||
ASSERT(len > 0);
|
||||
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
|
||||
if (unlikely(XFS_TEST_ERROR(
|
||||
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
|
||||
@ -5051,6 +5054,7 @@ xfs_bunmapi(
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
ASSERT(len > 0);
|
||||
ASSERT(nexts >= 0);
|
||||
|
||||
|
@ -618,22 +618,27 @@ xfs_getbmap(
|
||||
return XFS_ERROR(ENOMEM);
|
||||
|
||||
xfs_ilock(ip, XFS_IOLOCK_SHARED);
|
||||
if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
|
||||
if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) {
|
||||
if (whichfork == XFS_DATA_FORK) {
|
||||
if (!(iflags & BMV_IF_DELALLOC) &&
|
||||
(ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
|
||||
error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
|
||||
if (error)
|
||||
goto out_unlock_iolock;
|
||||
}
|
||||
/*
|
||||
* even after flushing the inode, there can still be delalloc
|
||||
* blocks on the inode beyond EOF due to speculative
|
||||
* preallocation. These are not removed until the release
|
||||
* function is called or the inode is inactivated. Hence we
|
||||
* cannot assert here that ip->i_delayed_blks == 0.
|
||||
*/
|
||||
}
|
||||
|
||||
lock = xfs_ilock_map_shared(ip);
|
||||
/*
|
||||
* Even after flushing the inode, there can still be
|
||||
* delalloc blocks on the inode beyond EOF due to
|
||||
* speculative preallocation. These are not removed
|
||||
* until the release function is called or the inode
|
||||
* is inactivated. Hence we cannot assert here that
|
||||
* ip->i_delayed_blks == 0.
|
||||
*/
|
||||
}
|
||||
|
||||
lock = xfs_ilock_data_map_shared(ip);
|
||||
} else {
|
||||
lock = xfs_ilock_attr_map_shared(ip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't let nex be bigger than the number of extents
|
||||
@ -738,7 +743,7 @@ xfs_getbmap(
|
||||
out_free_map:
|
||||
kmem_free(map);
|
||||
out_unlock_ilock:
|
||||
xfs_iunlock_map_shared(ip, lock);
|
||||
xfs_iunlock(ip, lock);
|
||||
out_unlock_iolock:
|
||||
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
|
||||
|
||||
@ -1169,9 +1174,15 @@ xfs_zero_remaining_bytes(
|
||||
xfs_buf_unlock(bp);
|
||||
|
||||
for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
|
||||
uint lock_mode;
|
||||
|
||||
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
nimap = 1;
|
||||
|
||||
lock_mode = xfs_ilock_data_map_shared(ip);
|
||||
error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
|
||||
xfs_iunlock(ip, lock_mode);
|
||||
|
||||
if (error || nimap < 1)
|
||||
break;
|
||||
ASSERT(imap.br_blockcount >= 1);
|
||||
|
@ -674,6 +674,7 @@ xfs_readdir(
|
||||
{
|
||||
int rval; /* return value */
|
||||
int v; /* type-checking value */
|
||||
uint lock_mode;
|
||||
|
||||
trace_xfs_readdir(dp);
|
||||
|
||||
@ -683,6 +684,7 @@ xfs_readdir(
|
||||
ASSERT(S_ISDIR(dp->i_d.di_mode));
|
||||
XFS_STATS_INC(xs_dir_getdents);
|
||||
|
||||
lock_mode = xfs_ilock_data_map_shared(dp);
|
||||
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
|
||||
rval = xfs_dir2_sf_getdents(dp, ctx);
|
||||
else if ((rval = xfs_dir2_isblock(NULL, dp, &v)))
|
||||
@ -691,5 +693,7 @@ xfs_readdir(
|
||||
rval = xfs_dir2_block_getdents(dp, ctx);
|
||||
else
|
||||
rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize);
|
||||
xfs_iunlock(dp, lock_mode);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
@ -469,16 +469,17 @@ xfs_qm_dqtobp(
|
||||
struct xfs_mount *mp = dqp->q_mount;
|
||||
xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
|
||||
struct xfs_trans *tp = (tpp ? *tpp : NULL);
|
||||
uint lock_mode;
|
||||
|
||||
dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
|
||||
|
||||
xfs_ilock(quotip, XFS_ILOCK_SHARED);
|
||||
lock_mode = xfs_ilock_data_map_shared(quotip);
|
||||
if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
|
||||
/*
|
||||
* Return if this type of quotas is turned off while we
|
||||
* didn't have the quota inode lock.
|
||||
*/
|
||||
xfs_iunlock(quotip, XFS_ILOCK_SHARED);
|
||||
xfs_iunlock(quotip, lock_mode);
|
||||
return ESRCH;
|
||||
}
|
||||
|
||||
@ -488,7 +489,7 @@ xfs_qm_dqtobp(
|
||||
error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
|
||||
XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
|
||||
|
||||
xfs_iunlock(quotip, XFS_ILOCK_SHARED);
|
||||
xfs_iunlock(quotip, lock_mode);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -912,7 +912,7 @@ xfs_dir_open(
|
||||
* If there are any blocks, read-ahead block 0 as we're almost
|
||||
* certain to have the next operation be a read there.
|
||||
*/
|
||||
mode = xfs_ilock_map_shared(ip);
|
||||
mode = xfs_ilock_data_map_shared(ip);
|
||||
if (ip->i_d.di_nextents > 0)
|
||||
xfs_dir3_data_readahead(NULL, ip, 0, -1);
|
||||
xfs_iunlock(ip, mode);
|
||||
@ -1215,7 +1215,7 @@ xfs_seek_data(
|
||||
uint lock;
|
||||
int error;
|
||||
|
||||
lock = xfs_ilock_map_shared(ip);
|
||||
lock = xfs_ilock_data_map_shared(ip);
|
||||
|
||||
isize = i_size_read(inode);
|
||||
if (start >= isize) {
|
||||
@ -1294,7 +1294,7 @@ out:
|
||||
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock_map_shared(ip, lock);
|
||||
xfs_iunlock(ip, lock);
|
||||
|
||||
if (error)
|
||||
return -error;
|
||||
@ -1319,7 +1319,7 @@ xfs_seek_hole(
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return -XFS_ERROR(EIO);
|
||||
|
||||
lock = xfs_ilock_map_shared(ip);
|
||||
lock = xfs_ilock_data_map_shared(ip);
|
||||
|
||||
isize = i_size_read(inode);
|
||||
if (start >= isize) {
|
||||
@ -1402,7 +1402,7 @@ out:
|
||||
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock_map_shared(ip, lock);
|
||||
xfs_iunlock(ip, lock);
|
||||
|
||||
if (error)
|
||||
return -error;
|
||||
|
@ -77,48 +77,44 @@ xfs_get_extsz_hint(
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a wrapper routine around the xfs_ilock() routine used to centralize
|
||||
* some grungy code. It is used in places that wish to lock the inode solely
|
||||
* for reading the extents. The reason these places can't just call
|
||||
* xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the
|
||||
* extents from disk for a file in b-tree format. If the inode is in b-tree
|
||||
* format, then we need to lock the inode exclusively until the extents are read
|
||||
* in. Locking it exclusively all the time would limit our parallelism
|
||||
* unnecessarily, though. What we do instead is check to see if the extents
|
||||
* have been read in yet, and only lock the inode exclusively if they have not.
|
||||
* These two are wrapper routines around the xfs_ilock() routine used to
|
||||
* centralize some grungy code. They are used in places that wish to lock the
|
||||
* inode solely for reading the extents. The reason these places can't just
|
||||
* call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
|
||||
* bringing in of the extents from disk for a file in b-tree format. If the
|
||||
* inode is in b-tree format, then we need to lock the inode exclusively until
|
||||
* the extents are read in. Locking it exclusively all the time would limit
|
||||
* our parallelism unnecessarily, though. What we do instead is check to see
|
||||
* if the extents have been read in yet, and only lock the inode exclusively
|
||||
* if they have not.
|
||||
*
|
||||
* The function returns a value which should be given to the corresponding
|
||||
* xfs_iunlock_map_shared(). This value is the mode in which the lock was
|
||||
* actually taken.
|
||||
* The functions return a value which should be given to the corresponding
|
||||
* xfs_iunlock() call.
|
||||
*/
|
||||
uint
|
||||
xfs_ilock_map_shared(
|
||||
xfs_inode_t *ip)
|
||||
xfs_ilock_data_map_shared(
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
uint lock_mode;
|
||||
uint lock_mode = XFS_ILOCK_SHARED;
|
||||
|
||||
if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
|
||||
((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
|
||||
if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
|
||||
(ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
|
||||
lock_mode = XFS_ILOCK_EXCL;
|
||||
} else {
|
||||
lock_mode = XFS_ILOCK_SHARED;
|
||||
}
|
||||
|
||||
xfs_ilock(ip, lock_mode);
|
||||
|
||||
return lock_mode;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is simply the unlock routine to go with xfs_ilock_map_shared().
|
||||
* All it does is call xfs_iunlock() with the given lock_mode.
|
||||
*/
|
||||
void
|
||||
xfs_iunlock_map_shared(
|
||||
xfs_inode_t *ip,
|
||||
unsigned int lock_mode)
|
||||
uint
|
||||
xfs_ilock_attr_map_shared(
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
xfs_iunlock(ip, lock_mode);
|
||||
uint lock_mode = XFS_ILOCK_SHARED;
|
||||
|
||||
if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
|
||||
(ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
|
||||
lock_mode = XFS_ILOCK_EXCL;
|
||||
xfs_ilock(ip, lock_mode);
|
||||
return lock_mode;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -588,9 +584,9 @@ xfs_lookup(
|
||||
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
lock_mode = xfs_ilock_map_shared(dp);
|
||||
lock_mode = xfs_ilock_data_map_shared(dp);
|
||||
error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
|
||||
xfs_iunlock_map_shared(dp, lock_mode);
|
||||
xfs_iunlock(dp, lock_mode);
|
||||
|
||||
if (error)
|
||||
goto out;
|
||||
|
@ -337,8 +337,8 @@ int xfs_ilock_nowait(xfs_inode_t *, uint);
|
||||
void xfs_iunlock(xfs_inode_t *, uint);
|
||||
void xfs_ilock_demote(xfs_inode_t *, uint);
|
||||
int xfs_isilocked(xfs_inode_t *, uint);
|
||||
uint xfs_ilock_map_shared(xfs_inode_t *);
|
||||
void xfs_iunlock_map_shared(xfs_inode_t *, uint);
|
||||
uint xfs_ilock_data_map_shared(struct xfs_inode *);
|
||||
uint xfs_ilock_attr_map_shared(struct xfs_inode *);
|
||||
int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t,
|
||||
xfs_nlink_t, xfs_dev_t, prid_t, int,
|
||||
struct xfs_buf **, xfs_inode_t **);
|
||||
|
@ -431,6 +431,8 @@ xfs_iread_extents(
|
||||
xfs_ifork_t *ifp;
|
||||
xfs_extnum_t nextents;
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
|
||||
if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
|
||||
XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
|
||||
ip->i_mount);
|
||||
|
@ -112,15 +112,11 @@ xfs_find_handle(
|
||||
memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
|
||||
hsize = sizeof(xfs_fsid_t);
|
||||
} else {
|
||||
int lock_mode;
|
||||
|
||||
lock_mode = xfs_ilock_map_shared(ip);
|
||||
handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
|
||||
sizeof(handle.ha_fid.fid_len);
|
||||
handle.ha_fid.fid_pad = 0;
|
||||
handle.ha_fid.fid_gen = ip->i_d.di_gen;
|
||||
handle.ha_fid.fid_ino = ip->i_ino;
|
||||
xfs_iunlock_map_shared(ip, lock_mode);
|
||||
|
||||
hsize = XFS_HSIZE(handle);
|
||||
}
|
||||
|
@ -1222,16 +1222,18 @@ xfs_qm_dqiterate(
|
||||
lblkno = 0;
|
||||
maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
|
||||
do {
|
||||
uint lock_mode;
|
||||
|
||||
nmaps = XFS_DQITER_MAP_SIZE;
|
||||
/*
|
||||
* We aren't changing the inode itself. Just changing
|
||||
* some of its data. No new blocks are added here, and
|
||||
* the inode is never added to the transaction.
|
||||
*/
|
||||
xfs_ilock(qip, XFS_ILOCK_SHARED);
|
||||
lock_mode = xfs_ilock_data_map_shared(qip);
|
||||
error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
|
||||
map, &nmaps, 0);
|
||||
xfs_iunlock(qip, XFS_ILOCK_SHARED);
|
||||
xfs_iunlock(qip, lock_mode);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user