xfs: update for v3.14-rc1
For 3.14-rc1 there are fixes in the areas of remote attributes, discard,
 growfs, memory leaks in recovery, directory v2, quotas, the MAINTAINERS
 file, allocation alignment, extent list locking, and in
 xfs_bmapi_allocate.  There are cleanups in xfs_setsize_buftarg, removing
 unused macros, quotas, setattr, and freeing of inode clusters.  The
 in-memory and on-disk log format have been decoupled, a common helper to
 calculate the number of blocks in an inode cluster has been added, and
 handling of i_version has been pulled into the filesystems that use it.
 
 - cleanup in xfs_setsize_buftarg
 - removal of remaining unused flags for vop toss/flush/flushinval
 - fix for memory corruption in xfs_attrlist_by_handle
 - fix for out-of-date comment in xfs_trans_dqlockedjoin
 - fix for discard if range length is less than one block
 - fix for overrun of agfl buffer using growfs on v4 superblock filesystems
 - pull i_version handling out into the filesystems that use it
 - don't leak recovery items on error
 - fix for memory leak in xfs_dir2_node_removename
 - several cleanups for quotas
 - fix bad assertion in xfs_qm_vop_create_dqattach
 - cleanup for xfs_setattr_mode, and add xfs_setattr_time
 - fix quota assert in xfs_setattr_nonsize
 - fix an infinite loop when turning off group/project quota before user
   quota
 - fix for temporary buffer allocation failure in xfs_dir2_block_to_sf
   with large directory block sizes
 - fix Dave's email address in MAINTAINERS
 - cleanup calculation of freed inode cluster blocks
 - fix alignment of initial file allocations to match filesystem geometry
 - decouple in-memory and on-disk log format
 - introduce a common helper to calculate the number of filesystem
   blocks in an inode cluster
 - fixes for extent list locking
 - fix for off-by-one in xfs_attr3_rmt_verify
 - fix for missing destroy_work_on_stack in xfs_bmapi_allocate
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.10 (GNU/Linux)
 
 iQIcBAABAgAGBQJS4C2sAAoJENaLyazVq6ZOF/UP/A/FmscsEbgz+KHtsg1UXP+g
 EMkrD8WOOzLnm7/GhkfvmmFLQrWrrNPmiP54MPJ/rxpfDb7rV60HgS0iHm13U+IR
 0uPyk2NIQKG/Sj6FHCrgn4oh19B0tmer6lLE32UPrlNM7w/0NRm32Ms/jKz7WNvc
 9Tqw3qNdtmP7leHG8EyD1ISzqUz6FNSC+qGyOTuBQUqG24LqsmZ1qD2Nw/HEz0ir
 1YCqS+cjj8c3WaWMsdjEFdCvEIKS/sMYM+ZihATIl/5ggwtVobP7PH/4cG8Biby3
 5wvcl3/jM+xjgGDC1YMQQeSADieus6ER4aZTjCvGLn9RajQTOBjP0QZTu2OHntTI
 kD/52DfYErthGijS2qJcqXy11AaYtWQU2yTZXuMpaACIM1DDSD4NyGFP99BzbBaX
 D4BgnC+vmfpbXO37PmophkeZwAvj+9K2BBG7X+g4sLynj/BtmvFCxIyK+SLHE3hN
 kn+Pn03yTw0VGgvj0krXSllbYKE0LyLElaA6h0LejQgcOZM0W6Q8qaj+RODLitbw
 HkQNKYCOMCzbdNu8rKAfup9UPZloiMukyMdMaw1MeCgCQSQLkZVxTegmVW9gzpIh
 QJEARDaOnKB/G/CLCwbPZR09DBpg3yBt/eHjt0VnV+z2x+u9DTInAp6f013g7E/W
 gU+vnBBPY1AYI8iiDraU
 =t8nB
 -----END PGP SIGNATURE-----
Merge tag 'xfs-for-linus-v3.14-rc1' of git://oss.sgi.com/xfs/xfs
Pull xfs update from Ben Myers:
 "This is primarily bug fixes, many of which you already have.  New
  stuff includes a series to decouple the in-memory and on-disk log
  format, helpers in the area of inode clusters, and i_version handling.
  We decided to try to use more topic branches this release, so there
  are some merge commits in there on account of that.  I'm afraid I
  didn't do a good job of putting meaningful comments in the first
  couple of merges.  Sorry about that.  I think I have the hang of it
  now.
  For 3.14-rc1 there are fixes in the areas of remote attributes,
  discard, growfs, memory leaks in recovery, directory v2, quotas, the
  MAINTAINERS file, allocation alignment, extent list locking, and in
  xfs_bmapi_allocate.  There are cleanups in xfs_setsize_buftarg,
  removing unused macros, quotas, setattr, and freeing of inode
  clusters.  The in-memory and on-disk log format have been decoupled, a
  common helper to calculate the number of blocks in an inode cluster
  has been added, and handling of i_version has been pulled into the
  filesystems that use it.
   - cleanup in xfs_setsize_buftarg
   - removal of remaining unused flags for vop toss/flush/flushinval
   - fix for memory corruption in xfs_attrlist_by_handle
   - fix for out-of-date comment in xfs_trans_dqlockedjoin
   - fix for discard if range length is less than one block
   - fix for overrun of agfl buffer using growfs on v4 superblock
     filesystems
   - pull i_version handling out into the filesystems that use it
   - don't leak recovery items on error
   - fix for memory leak in xfs_dir2_node_removename
   - several cleanups for quotas
   - fix bad assertion in xfs_qm_vop_create_dqattach
   - cleanup for xfs_setattr_mode, and add xfs_setattr_time
   - fix quota assert in xfs_setattr_nonsize
   - fix an infinite loop when turning off group/project quota before
     user quota
   - fix for temporary buffer allocation failure in xfs_dir2_block_to_sf
     with large directory block sizes
   - fix Dave's email address in MAINTAINERS
   - cleanup calculation of freed inode cluster blocks
   - fix alignment of initial file allocations to match filesystem
     geometry
   - decouple in-memory and on-disk log format
   - introduce a common helper to calculate the number of filesystem
     blocks in an inode cluster
   - fixes for extent list locking
   - fix for off-by-one in xfs_attr3_rmt_verify
   - fix for missing destroy_work_on_stack in xfs_bmapi_allocate"
* tag 'xfs-for-linus-v3.14-rc1' of git://oss.sgi.com/xfs/xfs: (51 commits)
  xfs: Calling destroy_work_on_stack() to pair with INIT_WORK_ONSTACK()
  xfs: fix off-by-one error in xfs_attr3_rmt_verify
  xfs: assert that we hold the ilock for extent map access
  xfs: use xfs_ilock_attr_map_shared in xfs_attr_list_int
  xfs: use xfs_ilock_attr_map_shared in xfs_attr_get
  xfs: use xfs_ilock_data_map_shared in xfs_qm_dqiterate
  xfs: use xfs_ilock_data_map_shared in xfs_qm_dqtobp
  xfs: take the ilock around xfs_bmapi_read in xfs_zero_remaining_bytes
  xfs: reinstate the ilock in xfs_readdir
  xfs: add xfs_ilock_attr_map_shared
  xfs: rename xfs_ilock_map_shared
  xfs: remove xfs_iunlock_map_shared
  xfs: no need to lock the inode in xfs_find_handle
  xfs: use xfs_icluster_size_fsb in xfs_imap
  xfs: use xfs_icluster_size_fsb in xfs_ifree_cluster
  xfs: use xfs_icluster_size_fsb in xfs_ialloc_inode_init
  xfs: use xfs_icluster_size_fsb in xfs_bulkstat
  xfs: introduce a common helper xfs_icluster_size_fsb
  xfs: get rid of XFS_IALLOC_BLOCKS macros
  xfs: get rid of XFS_INODE_CLUSTER_SIZE macros
  ...
			
			
This commit is contained in:
		
						commit
						1d32bdafaa
					
				| @ -202,11 +202,6 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de | ||||
| 			return -EPERM; | ||||
| 	} | ||||
| 
 | ||||
| 	if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) { | ||||
| 		if (attr->ia_size != inode->i_size) | ||||
| 			inode_inc_iversion(inode); | ||||
| 	} | ||||
| 
 | ||||
| 	if ((ia_valid & ATTR_MODE)) { | ||||
| 		umode_t amode = attr->ia_mode; | ||||
| 		/* Flag setting protected by i_mutex */ | ||||
|  | ||||
| @ -4354,8 +4354,12 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) | ||||
| 	 * these flags set.  For all other operations the VFS set these flags | ||||
| 	 * explicitly if it wants a timestamp update. | ||||
| 	 */ | ||||
| 	if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) | ||||
| 		inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb); | ||||
| 	if (newsize != oldsize) { | ||||
| 		inode_inc_iversion(inode); | ||||
| 		if (!(mask & (ATTR_CTIME | ATTR_MTIME))) | ||||
| 			inode->i_ctime = inode->i_mtime = | ||||
| 				current_fs_time(inode->i_sb); | ||||
| 	} | ||||
| 
 | ||||
| 	if (newsize > oldsize) { | ||||
| 		truncate_pagecache(inode, newsize); | ||||
|  | ||||
| @ -4586,6 +4586,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | ||||
| 			if (attr->ia_size > sbi->s_bitmap_maxbytes) | ||||
| 				return -EFBIG; | ||||
| 		} | ||||
| 
 | ||||
| 		if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) | ||||
| 			inode_inc_iversion(inode); | ||||
| 
 | ||||
| 		if (S_ISREG(inode->i_mode) && | ||||
| 		    (attr->ia_size < inode->i_size)) { | ||||
| 			if (ext4_should_order_data(inode)) { | ||||
|  | ||||
| @ -1217,7 +1217,7 @@ __xfs_get_blocks( | ||||
| 		lockmode = XFS_ILOCK_EXCL; | ||||
| 		xfs_ilock(ip, lockmode); | ||||
| 	} else { | ||||
| 		lockmode = xfs_ilock_map_shared(ip); | ||||
| 		lockmode = xfs_ilock_data_map_shared(ip); | ||||
| 	} | ||||
| 
 | ||||
| 	ASSERT(offset <= mp->m_super->s_maxbytes); | ||||
|  | ||||
| @ -164,6 +164,7 @@ xfs_attr_get( | ||||
| { | ||||
| 	int		error; | ||||
| 	struct xfs_name	xname; | ||||
| 	uint		lock_mode; | ||||
| 
 | ||||
| 	XFS_STATS_INC(xs_attr_get); | ||||
| 
 | ||||
| @ -174,9 +175,9 @@ xfs_attr_get( | ||||
| 	if (error) | ||||
| 		return error; | ||||
| 
 | ||||
| 	xfs_ilock(ip, XFS_ILOCK_SHARED); | ||||
| 	lock_mode = xfs_ilock_attr_map_shared(ip); | ||||
| 	error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags); | ||||
| 	xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||||
| 	xfs_iunlock(ip, lock_mode); | ||||
| 	return(error); | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -507,17 +507,17 @@ xfs_attr_list_int( | ||||
| { | ||||
| 	int error; | ||||
| 	xfs_inode_t *dp = context->dp; | ||||
| 	uint		lock_mode; | ||||
| 
 | ||||
| 	XFS_STATS_INC(xs_attr_list); | ||||
| 
 | ||||
| 	if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | ||||
| 		return EIO; | ||||
| 
 | ||||
| 	xfs_ilock(dp, XFS_ILOCK_SHARED); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Decide on what work routines to call based on the inode size. | ||||
| 	 */ | ||||
| 	lock_mode = xfs_ilock_attr_map_shared(dp); | ||||
| 	if (!xfs_inode_hasattr(dp)) { | ||||
| 		error = 0; | ||||
| 	} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { | ||||
| @ -527,9 +527,7 @@ xfs_attr_list_int( | ||||
| 	} else { | ||||
| 		error = xfs_attr_node_list(context); | ||||
| 	} | ||||
| 
 | ||||
| 	xfs_iunlock(dp, XFS_ILOCK_SHARED); | ||||
| 
 | ||||
| 	xfs_iunlock(dp, lock_mode); | ||||
| 	return error; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -4013,6 +4013,7 @@ xfs_bmapi_read( | ||||
| 	ASSERT(*nmap >= 1); | ||||
| 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| | ||||
| 			   XFS_BMAPI_IGSTATE))); | ||||
| 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); | ||||
| 
 | ||||
| 	if (unlikely(XFS_TEST_ERROR( | ||||
| 	    (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && | ||||
| @ -4207,6 +4208,7 @@ xfs_bmapi_delay( | ||||
| 	ASSERT(*nmap >= 1); | ||||
| 	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); | ||||
| 	ASSERT(!(flags & ~XFS_BMAPI_ENTIRE)); | ||||
| 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||||
| 
 | ||||
| 	if (unlikely(XFS_TEST_ERROR( | ||||
| 	    (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && | ||||
| @ -4500,6 +4502,7 @@ xfs_bmapi_write( | ||||
| 	ASSERT(tp != NULL); | ||||
| 	ASSERT(len > 0); | ||||
| 	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); | ||||
| 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||||
| 
 | ||||
| 	if (unlikely(XFS_TEST_ERROR( | ||||
| 	    (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && | ||||
| @ -5051,6 +5054,7 @@ xfs_bunmapi( | ||||
| 	if (XFS_FORCED_SHUTDOWN(mp)) | ||||
| 		return XFS_ERROR(EIO); | ||||
| 
 | ||||
| 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||||
| 	ASSERT(len > 0); | ||||
| 	ASSERT(nexts >= 0); | ||||
| 
 | ||||
|  | ||||
| @ -618,22 +618,27 @@ xfs_getbmap( | ||||
| 		return XFS_ERROR(ENOMEM); | ||||
| 
 | ||||
| 	xfs_ilock(ip, XFS_IOLOCK_SHARED); | ||||
| 	if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) { | ||||
| 		if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) { | ||||
| 	if (whichfork == XFS_DATA_FORK) { | ||||
| 		if (!(iflags & BMV_IF_DELALLOC) && | ||||
| 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) { | ||||
| 			error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); | ||||
| 			if (error) | ||||
| 				goto out_unlock_iolock; | ||||
| 		} | ||||
| 		/*
 | ||||
| 		 * even after flushing the inode, there can still be delalloc | ||||
| 		 * blocks on the inode beyond EOF due to speculative | ||||
| 		 * preallocation. These are not removed until the release | ||||
| 		 * function is called or the inode is inactivated. Hence we | ||||
| 		 * cannot assert here that ip->i_delayed_blks == 0. | ||||
| 		 */ | ||||
| 	} | ||||
| 
 | ||||
| 	lock = xfs_ilock_map_shared(ip); | ||||
| 			/*
 | ||||
| 			 * Even after flushing the inode, there can still be | ||||
| 			 * delalloc blocks on the inode beyond EOF due to | ||||
| 			 * speculative preallocation.  These are not removed | ||||
| 			 * until the release function is called or the inode | ||||
| 			 * is inactivated.  Hence we cannot assert here that | ||||
| 			 * ip->i_delayed_blks == 0. | ||||
| 			 */ | ||||
| 		} | ||||
| 
 | ||||
| 		lock = xfs_ilock_data_map_shared(ip); | ||||
| 	} else { | ||||
| 		lock = xfs_ilock_attr_map_shared(ip); | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Don't let nex be bigger than the number of extents | ||||
| @ -738,7 +743,7 @@ xfs_getbmap( | ||||
|  out_free_map: | ||||
| 	kmem_free(map); | ||||
|  out_unlock_ilock: | ||||
| 	xfs_iunlock_map_shared(ip, lock); | ||||
| 	xfs_iunlock(ip, lock); | ||||
|  out_unlock_iolock: | ||||
| 	xfs_iunlock(ip, XFS_IOLOCK_SHARED); | ||||
| 
 | ||||
| @ -1169,9 +1174,15 @@ xfs_zero_remaining_bytes( | ||||
| 	xfs_buf_unlock(bp); | ||||
| 
 | ||||
| 	for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { | ||||
| 		uint lock_mode; | ||||
| 
 | ||||
| 		offset_fsb = XFS_B_TO_FSBT(mp, offset); | ||||
| 		nimap = 1; | ||||
| 
 | ||||
| 		lock_mode = xfs_ilock_data_map_shared(ip); | ||||
| 		error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0); | ||||
| 		xfs_iunlock(ip, lock_mode); | ||||
| 
 | ||||
| 		if (error || nimap < 1) | ||||
| 			break; | ||||
| 		ASSERT(imap.br_blockcount >= 1); | ||||
|  | ||||
| @ -1593,12 +1593,11 @@ xfs_free_buftarg( | ||||
| 	kmem_free(btp); | ||||
| } | ||||
| 
 | ||||
| STATIC int | ||||
| xfs_setsize_buftarg_flags( | ||||
| int | ||||
| xfs_setsize_buftarg( | ||||
| 	xfs_buftarg_t		*btp, | ||||
| 	unsigned int		blocksize, | ||||
| 	unsigned int		sectorsize, | ||||
| 	int			verbose) | ||||
| 	unsigned int		sectorsize) | ||||
| { | ||||
| 	btp->bt_bsize = blocksize; | ||||
| 	btp->bt_sshift = ffs(sectorsize) - 1; | ||||
| @ -1619,26 +1618,17 @@ xfs_setsize_buftarg_flags( | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  *	When allocating the initial buffer target we have not yet | ||||
|  *	read in the superblock, so don't know what sized sectors | ||||
|  *	are being used at this early stage.  Play safe. | ||||
|  * When allocating the initial buffer target we have not yet | ||||
|  * read in the superblock, so don't know what sized sectors | ||||
|  * are being used at this early stage.  Play safe. | ||||
|  */ | ||||
| STATIC int | ||||
| xfs_setsize_buftarg_early( | ||||
| 	xfs_buftarg_t		*btp, | ||||
| 	struct block_device	*bdev) | ||||
| { | ||||
| 	return xfs_setsize_buftarg_flags(btp, | ||||
| 			PAGE_SIZE, bdev_logical_block_size(bdev), 0); | ||||
| } | ||||
| 
 | ||||
| int | ||||
| xfs_setsize_buftarg( | ||||
| 	xfs_buftarg_t		*btp, | ||||
| 	unsigned int		blocksize, | ||||
| 	unsigned int		sectorsize) | ||||
| { | ||||
| 	return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1); | ||||
| 	return xfs_setsize_buftarg(btp, PAGE_SIZE, | ||||
| 				   bdev_logical_block_size(bdev)); | ||||
| } | ||||
| 
 | ||||
| xfs_buftarg_t * | ||||
|  | ||||
| @ -182,21 +182,47 @@ xfs_buf_item_size( | ||||
| 	trace_xfs_buf_item_size(bip); | ||||
| } | ||||
| 
 | ||||
| static struct xfs_log_iovec * | ||||
| static inline void | ||||
| xfs_buf_item_copy_iovec( | ||||
| 	struct xfs_log_vec	*lv, | ||||
| 	struct xfs_log_iovec	**vecp, | ||||
| 	struct xfs_buf		*bp, | ||||
| 	uint			offset, | ||||
| 	int			first_bit, | ||||
| 	uint			nbits) | ||||
| { | ||||
| 	offset += first_bit * XFS_BLF_CHUNK; | ||||
| 	xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK, | ||||
| 			xfs_buf_offset(bp, offset), | ||||
| 			nbits * XFS_BLF_CHUNK); | ||||
| } | ||||
| 
 | ||||
| static inline bool | ||||
| xfs_buf_item_straddle( | ||||
| 	struct xfs_buf		*bp, | ||||
| 	uint			offset, | ||||
| 	int			next_bit, | ||||
| 	int			last_bit) | ||||
| { | ||||
| 	return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) != | ||||
| 		(xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) + | ||||
| 		 XFS_BLF_CHUNK); | ||||
| } | ||||
| 
 | ||||
| static void | ||||
| xfs_buf_item_format_segment( | ||||
| 	struct xfs_buf_log_item	*bip, | ||||
| 	struct xfs_log_iovec	*vecp, | ||||
| 	struct xfs_log_vec	*lv, | ||||
| 	struct xfs_log_iovec	**vecp, | ||||
| 	uint			offset, | ||||
| 	struct xfs_buf_log_format *blfp) | ||||
| { | ||||
| 	struct xfs_buf	*bp = bip->bli_buf; | ||||
| 	uint		base_size; | ||||
| 	uint		nvecs; | ||||
| 	int		first_bit; | ||||
| 	int		last_bit; | ||||
| 	int		next_bit; | ||||
| 	uint		nbits; | ||||
| 	uint		buffer_offset; | ||||
| 
 | ||||
| 	/* copy the flags across from the base format item */ | ||||
| 	blfp->blf_flags = bip->__bli_format.blf_flags; | ||||
| @ -208,21 +234,17 @@ xfs_buf_item_format_segment( | ||||
| 	 */ | ||||
| 	base_size = xfs_buf_log_format_size(blfp); | ||||
| 
 | ||||
| 	nvecs = 0; | ||||
| 	first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0); | ||||
| 	if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) { | ||||
| 		/*
 | ||||
| 		 * If the map is not be dirty in the transaction, mark | ||||
| 		 * the size as zero and do not advance the vector pointer. | ||||
| 		 */ | ||||
| 		goto out; | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	vecp->i_addr = blfp; | ||||
| 	vecp->i_len = base_size; | ||||
| 	vecp->i_type = XLOG_REG_TYPE_BFORMAT; | ||||
| 	vecp++; | ||||
| 	nvecs = 1; | ||||
| 	blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size); | ||||
| 	blfp->blf_size = 1; | ||||
| 
 | ||||
| 	if (bip->bli_flags & XFS_BLI_STALE) { | ||||
| 		/*
 | ||||
| @ -232,14 +254,13 @@ xfs_buf_item_format_segment( | ||||
| 		 */ | ||||
| 		trace_xfs_buf_item_format_stale(bip); | ||||
| 		ASSERT(blfp->blf_flags & XFS_BLF_CANCEL); | ||||
| 		goto out; | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Fill in an iovec for each set of contiguous chunks. | ||||
| 	 */ | ||||
| 
 | ||||
| 	last_bit = first_bit; | ||||
| 	nbits = 1; | ||||
| 	for (;;) { | ||||
| @ -252,42 +273,22 @@ xfs_buf_item_format_segment( | ||||
| 		next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, | ||||
| 					(uint)last_bit + 1); | ||||
| 		/*
 | ||||
| 		 * If we run out of bits fill in the last iovec and get | ||||
| 		 * out of the loop. | ||||
| 		 * Else if we start a new set of bits then fill in the | ||||
| 		 * iovec for the series we were looking at and start | ||||
| 		 * counting the bits in the new one. | ||||
| 		 * Else we're still in the same set of bits so just | ||||
| 		 * keep counting and scanning. | ||||
| 		 * If we run out of bits fill in the last iovec and get out of | ||||
| 		 * the loop.  Else if we start a new set of bits then fill in | ||||
| 		 * the iovec for the series we were looking at and start | ||||
| 		 * counting the bits in the new one.  Else we're still in the | ||||
| 		 * same set of bits so just keep counting and scanning. | ||||
| 		 */ | ||||
| 		if (next_bit == -1) { | ||||
| 			buffer_offset = offset + first_bit * XFS_BLF_CHUNK; | ||||
| 			vecp->i_addr = xfs_buf_offset(bp, buffer_offset); | ||||
| 			vecp->i_len = nbits * XFS_BLF_CHUNK; | ||||
| 			vecp->i_type = XLOG_REG_TYPE_BCHUNK; | ||||
| 			nvecs++; | ||||
| 			xfs_buf_item_copy_iovec(lv, vecp, bp, offset, | ||||
| 						first_bit, nbits); | ||||
| 			blfp->blf_size++; | ||||
| 			break; | ||||
| 		} else if (next_bit != last_bit + 1) { | ||||
| 			buffer_offset = offset + first_bit * XFS_BLF_CHUNK; | ||||
| 			vecp->i_addr = xfs_buf_offset(bp, buffer_offset); | ||||
| 			vecp->i_len = nbits * XFS_BLF_CHUNK; | ||||
| 			vecp->i_type = XLOG_REG_TYPE_BCHUNK; | ||||
| 			nvecs++; | ||||
| 			vecp++; | ||||
| 			first_bit = next_bit; | ||||
| 			last_bit = next_bit; | ||||
| 			nbits = 1; | ||||
| 		} else if (xfs_buf_offset(bp, offset + | ||||
| 					      (next_bit << XFS_BLF_SHIFT)) != | ||||
| 			   (xfs_buf_offset(bp, offset + | ||||
| 					       (last_bit << XFS_BLF_SHIFT)) + | ||||
| 			    XFS_BLF_CHUNK)) { | ||||
| 			buffer_offset = offset + first_bit * XFS_BLF_CHUNK; | ||||
| 			vecp->i_addr = xfs_buf_offset(bp, buffer_offset); | ||||
| 			vecp->i_len = nbits * XFS_BLF_CHUNK; | ||||
| 			vecp->i_type = XLOG_REG_TYPE_BCHUNK; | ||||
| 			nvecs++; | ||||
| 			vecp++; | ||||
| 		} else if (next_bit != last_bit + 1 || | ||||
| 		           xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) { | ||||
| 			xfs_buf_item_copy_iovec(lv, vecp, bp, offset, | ||||
| 						first_bit, nbits); | ||||
| 			blfp->blf_size++; | ||||
| 			first_bit = next_bit; | ||||
| 			last_bit = next_bit; | ||||
| 			nbits = 1; | ||||
| @ -296,9 +297,6 @@ xfs_buf_item_format_segment( | ||||
| 			nbits++; | ||||
| 		} | ||||
| 	} | ||||
| out: | ||||
| 	blfp->blf_size = nvecs; | ||||
| 	return vecp; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -310,10 +308,11 @@ out: | ||||
| STATIC void | ||||
| xfs_buf_item_format( | ||||
| 	struct xfs_log_item	*lip, | ||||
| 	struct xfs_log_iovec	*vecp) | ||||
| 	struct xfs_log_vec	*lv) | ||||
| { | ||||
| 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip); | ||||
| 	struct xfs_buf		*bp = bip->bli_buf; | ||||
| 	struct xfs_log_iovec	*vecp = NULL; | ||||
| 	uint			offset = 0; | ||||
| 	int			i; | ||||
| 
 | ||||
| @ -354,8 +353,8 @@ xfs_buf_item_format( | ||||
| 	} | ||||
| 
 | ||||
| 	for (i = 0; i < bip->bli_format_count; i++) { | ||||
| 		vecp = xfs_buf_item_format_segment(bip, vecp, offset, | ||||
| 						&bip->bli_formats[i]); | ||||
| 		xfs_buf_item_format_segment(bip, lv, &vecp, offset, | ||||
| 					    &bip->bli_formats[i]); | ||||
| 		offset += bp->b_maps[i].bm_len; | ||||
| 	} | ||||
| 
 | ||||
|  | ||||
| @ -674,6 +674,7 @@ xfs_readdir( | ||||
| { | ||||
| 	int		rval;		/* return value */ | ||||
| 	int		v;		/* type-checking value */ | ||||
| 	uint		lock_mode; | ||||
| 
 | ||||
| 	trace_xfs_readdir(dp); | ||||
| 
 | ||||
| @ -683,6 +684,7 @@ xfs_readdir( | ||||
| 	ASSERT(S_ISDIR(dp->i_d.di_mode)); | ||||
| 	XFS_STATS_INC(xs_dir_getdents); | ||||
| 
 | ||||
| 	lock_mode = xfs_ilock_data_map_shared(dp); | ||||
| 	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) | ||||
| 		rval = xfs_dir2_sf_getdents(dp, ctx); | ||||
| 	else if ((rval = xfs_dir2_isblock(NULL, dp, &v))) | ||||
| @ -691,5 +693,7 @@ xfs_readdir( | ||||
| 		rval = xfs_dir2_block_getdents(dp, ctx); | ||||
| 	else | ||||
| 		rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize); | ||||
| 	xfs_iunlock(dp, lock_mode); | ||||
| 
 | ||||
| 	return rval; | ||||
| } | ||||
|  | ||||
| @ -170,6 +170,7 @@ xfs_dir2_block_to_sf( | ||||
| 	char			*ptr;		/* current data pointer */ | ||||
| 	xfs_dir2_sf_entry_t	*sfep;		/* shortform entry */ | ||||
| 	xfs_dir2_sf_hdr_t	*sfp;		/* shortform directory header */ | ||||
| 	xfs_dir2_sf_hdr_t	*dst;		/* temporary data buffer */ | ||||
| 
 | ||||
| 	trace_xfs_dir2_block_to_sf(args); | ||||
| 
 | ||||
| @ -177,35 +178,20 @@ xfs_dir2_block_to_sf( | ||||
| 	mp = dp->i_mount; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Make a copy of the block data, so we can shrink the inode | ||||
| 	 * and add local data. | ||||
| 	 * allocate a temporary destination buffer the size of the inode | ||||
| 	 * to format the data into. Once we have formatted the data, we | ||||
| 	 * can free the block and copy the formatted data into the inode literal | ||||
| 	 * area. | ||||
| 	 */ | ||||
| 	hdr = kmem_alloc(mp->m_dirblksize, KM_SLEEP); | ||||
| 	memcpy(hdr, bp->b_addr, mp->m_dirblksize); | ||||
| 	logflags = XFS_ILOG_CORE; | ||||
| 	if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) { | ||||
| 		ASSERT(error != ENOSPC); | ||||
| 		goto out; | ||||
| 	} | ||||
| 	dst = kmem_alloc(mp->m_sb.sb_inodesize, KM_SLEEP); | ||||
| 	hdr = bp->b_addr; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * The buffer is now unconditionally gone, whether | ||||
| 	 * xfs_dir2_shrink_inode worked or not. | ||||
| 	 * | ||||
| 	 * Convert the inode to local format. | ||||
| 	 */ | ||||
| 	dp->i_df.if_flags &= ~XFS_IFEXTENTS; | ||||
| 	dp->i_df.if_flags |= XFS_IFINLINE; | ||||
| 	dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; | ||||
| 	ASSERT(dp->i_df.if_bytes == 0); | ||||
| 	xfs_idata_realloc(dp, size, XFS_DATA_FORK); | ||||
| 	logflags |= XFS_ILOG_DDATA; | ||||
| 	/*
 | ||||
| 	 * Copy the header into the newly allocate local space. | ||||
| 	 */ | ||||
| 	sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; | ||||
| 	sfp = (xfs_dir2_sf_hdr_t *)dst; | ||||
| 	memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count)); | ||||
| 	dp->i_d.di_size = size; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Set up to loop over the block's entries. | ||||
| 	 */ | ||||
| @ -258,10 +244,34 @@ xfs_dir2_block_to_sf( | ||||
| 		ptr += dp->d_ops->data_entsize(dep->namelen); | ||||
| 	} | ||||
| 	ASSERT((char *)sfep - (char *)sfp == size); | ||||
| 
 | ||||
| 	/* now we are done with the block, we can shrink the inode */ | ||||
| 	logflags = XFS_ILOG_CORE; | ||||
| 	error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp); | ||||
| 	if (error) { | ||||
| 		ASSERT(error != ENOSPC); | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * The buffer is now unconditionally gone, whether | ||||
| 	 * xfs_dir2_shrink_inode worked or not. | ||||
| 	 * | ||||
| 	 * Convert the inode to local format and copy the data in. | ||||
| 	 */ | ||||
| 	dp->i_df.if_flags &= ~XFS_IFEXTENTS; | ||||
| 	dp->i_df.if_flags |= XFS_IFINLINE; | ||||
| 	dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; | ||||
| 	ASSERT(dp->i_df.if_bytes == 0); | ||||
| 	xfs_idata_realloc(dp, size, XFS_DATA_FORK); | ||||
| 
 | ||||
| 	logflags |= XFS_ILOG_DDATA; | ||||
| 	memcpy(dp->i_df.if_u1.if_data, dst, size); | ||||
| 	dp->i_d.di_size = size; | ||||
| 	xfs_dir2_sf_check(args); | ||||
| out: | ||||
| 	xfs_trans_log_inode(args->trans, dp, logflags); | ||||
| 	kmem_free(hdr); | ||||
| 	kmem_free(dst); | ||||
| 	return error; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -469,16 +469,17 @@ xfs_qm_dqtobp( | ||||
| 	struct xfs_mount	*mp = dqp->q_mount; | ||||
| 	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id); | ||||
| 	struct xfs_trans	*tp = (tpp ? *tpp : NULL); | ||||
| 	uint			lock_mode; | ||||
| 
 | ||||
| 	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; | ||||
| 
 | ||||
| 	xfs_ilock(quotip, XFS_ILOCK_SHARED); | ||||
| 	lock_mode = xfs_ilock_data_map_shared(quotip); | ||||
| 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { | ||||
| 		/*
 | ||||
| 		 * Return if this type of quotas is turned off while we | ||||
| 		 * didn't have the quota inode lock. | ||||
| 		 */ | ||||
| 		xfs_iunlock(quotip, XFS_ILOCK_SHARED); | ||||
| 		xfs_iunlock(quotip, lock_mode); | ||||
| 		return ESRCH; | ||||
| 	} | ||||
| 
 | ||||
| @ -488,7 +489,7 @@ xfs_qm_dqtobp( | ||||
| 	error = xfs_bmapi_read(quotip, dqp->q_fileoffset, | ||||
| 			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); | ||||
| 
 | ||||
| 	xfs_iunlock(quotip, XFS_ILOCK_SHARED); | ||||
| 	xfs_iunlock(quotip, lock_mode); | ||||
| 	if (error) | ||||
| 		return error; | ||||
| 
 | ||||
|  | ||||
| @ -57,20 +57,24 @@ xfs_qm_dquot_logitem_size( | ||||
| STATIC void | ||||
| xfs_qm_dquot_logitem_format( | ||||
| 	struct xfs_log_item	*lip, | ||||
| 	struct xfs_log_iovec	*logvec) | ||||
| 	struct xfs_log_vec	*lv) | ||||
| { | ||||
| 	struct xfs_dq_logitem	*qlip = DQUOT_ITEM(lip); | ||||
| 	struct xfs_log_iovec	*vecp = NULL; | ||||
| 	struct xfs_dq_logformat	*qlf; | ||||
| 
 | ||||
| 	logvec->i_addr = &qlip->qli_format; | ||||
| 	logvec->i_len  = sizeof(xfs_dq_logformat_t); | ||||
| 	logvec->i_type = XLOG_REG_TYPE_QFORMAT; | ||||
| 	logvec++; | ||||
| 	logvec->i_addr = &qlip->qli_dquot->q_core; | ||||
| 	logvec->i_len  = sizeof(xfs_disk_dquot_t); | ||||
| 	logvec->i_type = XLOG_REG_TYPE_DQUOT; | ||||
| 
 | ||||
| 	qlip->qli_format.qlf_size = 2; | ||||
| 	qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QFORMAT); | ||||
| 	qlf->qlf_type = XFS_LI_DQUOT; | ||||
| 	qlf->qlf_size = 2; | ||||
| 	qlf->qlf_id = be32_to_cpu(qlip->qli_dquot->q_core.d_id); | ||||
| 	qlf->qlf_blkno = qlip->qli_dquot->q_blkno; | ||||
| 	qlf->qlf_len = 1; | ||||
| 	qlf->qlf_boffset = qlip->qli_dquot->q_bufoffset; | ||||
| 	xlog_finish_iovec(lv, vecp, sizeof(struct xfs_dq_logformat)); | ||||
| 
 | ||||
| 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_DQUOT, | ||||
| 			&qlip->qli_dquot->q_core, | ||||
| 			sizeof(struct xfs_disk_dquot)); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -257,18 +261,6 @@ xfs_qm_dquot_logitem_init( | ||||
| 	xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT, | ||||
| 					&xfs_dquot_item_ops); | ||||
| 	lp->qli_dquot = dqp; | ||||
| 	lp->qli_format.qlf_type = XFS_LI_DQUOT; | ||||
| 	lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id); | ||||
| 	lp->qli_format.qlf_blkno = dqp->q_blkno; | ||||
| 	lp->qli_format.qlf_len = 1; | ||||
| 	/*
 | ||||
| 	 * This is just the offset of this dquot within its buffer | ||||
| 	 * (which is currently 1 FSB and probably won't change). | ||||
| 	 * Hence 32 bits for this offset should be just fine. | ||||
| 	 * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t)) | ||||
| 	 * here, and recompute it at recovery time. | ||||
| 	 */ | ||||
| 	lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset; | ||||
| } | ||||
| 
 | ||||
| /*------------------  QUOTAOFF LOG ITEMS  -------------------*/ | ||||
| @ -294,26 +286,20 @@ xfs_qm_qoff_logitem_size( | ||||
| 	*nbytes += sizeof(struct xfs_qoff_logitem); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This is called to fill in the vector of log iovecs for the | ||||
|  * given quotaoff log item. We use only 1 iovec, and we point that | ||||
|  * at the quotaoff_log_format structure embedded in the quotaoff item. | ||||
|  * It is at this point that we assert that all of the extent | ||||
|  * slots in the quotaoff item have been filled. | ||||
|  */ | ||||
| STATIC void | ||||
| xfs_qm_qoff_logitem_format( | ||||
| 	struct xfs_log_item	*lip, | ||||
| 	struct xfs_log_iovec	*log_vector) | ||||
| 	struct xfs_log_vec	*lv) | ||||
| { | ||||
| 	struct xfs_qoff_logitem	*qflip = QOFF_ITEM(lip); | ||||
| 	struct xfs_log_iovec	*vecp = NULL; | ||||
| 	struct xfs_qoff_logformat *qlf; | ||||
| 
 | ||||
| 	ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF); | ||||
| 
 | ||||
| 	log_vector->i_addr = &qflip->qql_format; | ||||
| 	log_vector->i_len = sizeof(xfs_qoff_logitem_t); | ||||
| 	log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF; | ||||
| 	qflip->qql_format.qf_size = 1; | ||||
| 	qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QUOTAOFF); | ||||
| 	qlf->qf_type = XFS_LI_QUOTAOFF; | ||||
| 	qlf->qf_size = 1; | ||||
| 	qlf->qf_flags = qflip->qql_flags; | ||||
| 	xlog_finish_iovec(lv, vecp, sizeof(struct xfs_qoff_logitem)); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -453,8 +439,7 @@ xfs_qm_qoff_logitem_init( | ||||
| 	xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? | ||||
| 			&xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); | ||||
| 	qf->qql_item.li_mountp = mp; | ||||
| 	qf->qql_format.qf_type = XFS_LI_QUOTAOFF; | ||||
| 	qf->qql_format.qf_flags = flags; | ||||
| 	qf->qql_start_lip = start; | ||||
| 	qf->qql_flags = flags; | ||||
| 	return qf; | ||||
| } | ||||
|  | ||||
| @ -27,13 +27,12 @@ typedef struct xfs_dq_logitem { | ||||
| 	xfs_log_item_t		 qli_item;	   /* common portion */ | ||||
| 	struct xfs_dquot	*qli_dquot;	   /* dquot ptr */ | ||||
| 	xfs_lsn_t		 qli_flush_lsn;	   /* lsn at last flush */ | ||||
| 	xfs_dq_logformat_t	 qli_format;	   /* logged structure */ | ||||
| } xfs_dq_logitem_t; | ||||
| 
 | ||||
| typedef struct xfs_qoff_logitem { | ||||
| 	xfs_log_item_t		 qql_item;	/* common portion */ | ||||
| 	struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */ | ||||
| 	xfs_qoff_logformat_t	 qql_format;	/* logged structure */ | ||||
| 	unsigned int		qql_flags; | ||||
| } xfs_qoff_logitem_t; | ||||
| 
 | ||||
| 
 | ||||
|  | ||||
| @ -26,6 +26,7 @@ | ||||
| #include "xfs_trans_priv.h" | ||||
| #include "xfs_buf_item.h" | ||||
| #include "xfs_extfree_item.h" | ||||
| #include "xfs_log.h" | ||||
| 
 | ||||
| 
 | ||||
| kmem_zone_t	*xfs_efi_zone; | ||||
| @ -101,9 +102,10 @@ xfs_efi_item_size( | ||||
| STATIC void | ||||
| xfs_efi_item_format( | ||||
| 	struct xfs_log_item	*lip, | ||||
| 	struct xfs_log_iovec	*log_vector) | ||||
| 	struct xfs_log_vec	*lv) | ||||
| { | ||||
| 	struct xfs_efi_log_item	*efip = EFI_ITEM(lip); | ||||
| 	struct xfs_log_iovec	*vecp = NULL; | ||||
| 
 | ||||
| 	ASSERT(atomic_read(&efip->efi_next_extent) == | ||||
| 				efip->efi_format.efi_nextents); | ||||
| @ -111,10 +113,9 @@ xfs_efi_item_format( | ||||
| 	efip->efi_format.efi_type = XFS_LI_EFI; | ||||
| 	efip->efi_format.efi_size = 1; | ||||
| 
 | ||||
| 	log_vector->i_addr = &efip->efi_format; | ||||
| 	log_vector->i_len = xfs_efi_item_sizeof(efip); | ||||
| 	log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT; | ||||
| 	ASSERT(log_vector->i_len >= sizeof(xfs_efi_log_format_t)); | ||||
| 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_EFI_FORMAT, | ||||
| 			&efip->efi_format, | ||||
| 			xfs_efi_item_sizeof(efip)); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| @ -368,19 +369,19 @@ xfs_efd_item_size( | ||||
| STATIC void | ||||
| xfs_efd_item_format( | ||||
| 	struct xfs_log_item	*lip, | ||||
| 	struct xfs_log_iovec	*log_vector) | ||||
| 	struct xfs_log_vec	*lv) | ||||
| { | ||||
| 	struct xfs_efd_log_item	*efdp = EFD_ITEM(lip); | ||||
| 	struct xfs_log_iovec	*vecp = NULL; | ||||
| 
 | ||||
| 	ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents); | ||||
| 
 | ||||
| 	efdp->efd_format.efd_type = XFS_LI_EFD; | ||||
| 	efdp->efd_format.efd_size = 1; | ||||
| 
 | ||||
| 	log_vector->i_addr = &efdp->efd_format; | ||||
| 	log_vector->i_len = xfs_efd_item_sizeof(efdp); | ||||
| 	log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT; | ||||
| 	ASSERT(log_vector->i_len >= sizeof(xfs_efd_log_format_t)); | ||||
| 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_EFD_FORMAT, | ||||
| 			&efdp->efd_format, | ||||
| 			xfs_efd_item_sizeof(efdp)); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -912,7 +912,7 @@ xfs_dir_open( | ||||
| 	 * If there are any blocks, read-ahead block 0 as we're almost | ||||
| 	 * certain to have the next operation be a read there. | ||||
| 	 */ | ||||
| 	mode = xfs_ilock_map_shared(ip); | ||||
| 	mode = xfs_ilock_data_map_shared(ip); | ||||
| 	if (ip->i_d.di_nextents > 0) | ||||
| 		xfs_dir3_data_readahead(NULL, ip, 0, -1); | ||||
| 	xfs_iunlock(ip, mode); | ||||
| @ -1215,7 +1215,7 @@ xfs_seek_data( | ||||
| 	uint			lock; | ||||
| 	int			error; | ||||
| 
 | ||||
| 	lock = xfs_ilock_map_shared(ip); | ||||
| 	lock = xfs_ilock_data_map_shared(ip); | ||||
| 
 | ||||
| 	isize = i_size_read(inode); | ||||
| 	if (start >= isize) { | ||||
| @ -1294,7 +1294,7 @@ out: | ||||
| 	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); | ||||
| 
 | ||||
| out_unlock: | ||||
| 	xfs_iunlock_map_shared(ip, lock); | ||||
| 	xfs_iunlock(ip, lock); | ||||
| 
 | ||||
| 	if (error) | ||||
| 		return -error; | ||||
| @ -1319,7 +1319,7 @@ xfs_seek_hole( | ||||
| 	if (XFS_FORCED_SHUTDOWN(mp)) | ||||
| 		return -XFS_ERROR(EIO); | ||||
| 
 | ||||
| 	lock = xfs_ilock_map_shared(ip); | ||||
| 	lock = xfs_ilock_data_map_shared(ip); | ||||
| 
 | ||||
| 	isize = i_size_read(inode); | ||||
| 	if (start >= isize) { | ||||
| @ -1402,7 +1402,7 @@ out: | ||||
| 	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); | ||||
| 
 | ||||
| out_unlock: | ||||
| 	xfs_iunlock_map_shared(ip, lock); | ||||
| 	xfs_iunlock(ip, lock); | ||||
| 
 | ||||
| 	if (error) | ||||
| 		return -error; | ||||
|  | ||||
| @ -52,7 +52,7 @@ xfs_ialloc_cluster_alignment( | ||||
| { | ||||
| 	if (xfs_sb_version_hasalign(&args->mp->m_sb) && | ||||
| 	    args->mp->m_sb.sb_inoalignmt >= | ||||
| 	     XFS_B_TO_FSBT(args->mp, XFS_INODE_CLUSTER_SIZE(args->mp))) | ||||
| 	     XFS_B_TO_FSBT(args->mp, args->mp->m_inode_cluster_size)) | ||||
| 		return args->mp->m_sb.sb_inoalignmt; | ||||
| 	return 1; | ||||
| } | ||||
| @ -170,27 +170,20 @@ xfs_ialloc_inode_init( | ||||
| { | ||||
| 	struct xfs_buf		*fbuf; | ||||
| 	struct xfs_dinode	*free; | ||||
| 	int			blks_per_cluster, nbufs, ninodes; | ||||
| 	int			nbufs, blks_per_cluster, inodes_per_cluster; | ||||
| 	int			version; | ||||
| 	int			i, j; | ||||
| 	xfs_daddr_t		d; | ||||
| 	xfs_ino_t		ino = 0; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Loop over the new block(s), filling in the inodes. | ||||
| 	 * For small block sizes, manipulate the inodes in buffers | ||||
| 	 * which are multiples of the blocks size. | ||||
| 	 * Loop over the new block(s), filling in the inodes.  For small block | ||||
| 	 * sizes, manipulate the inodes in buffers  which are multiples of the | ||||
| 	 * blocks size. | ||||
| 	 */ | ||||
| 	if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { | ||||
| 		blks_per_cluster = 1; | ||||
| 		nbufs = length; | ||||
| 		ninodes = mp->m_sb.sb_inopblock; | ||||
| 	} else { | ||||
| 		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / | ||||
| 				   mp->m_sb.sb_blocksize; | ||||
| 		nbufs = length / blks_per_cluster; | ||||
| 		ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; | ||||
| 	} | ||||
| 	blks_per_cluster = xfs_icluster_size_fsb(mp); | ||||
| 	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; | ||||
| 	nbufs = length / blks_per_cluster; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Figure out what version number to use in the inodes we create.  If | ||||
| @ -225,7 +218,7 @@ xfs_ialloc_inode_init( | ||||
| 		 * they track in the AIL as if they were physically logged. | ||||
| 		 */ | ||||
| 		if (tp) | ||||
| 			xfs_icreate_log(tp, agno, agbno, XFS_IALLOC_INODES(mp), | ||||
| 			xfs_icreate_log(tp, agno, agbno, mp->m_ialloc_inos, | ||||
| 					mp->m_sb.sb_inodesize, length, gen); | ||||
| 	} else if (xfs_sb_version_hasnlink(&mp->m_sb)) | ||||
| 		version = 2; | ||||
| @ -246,7 +239,7 @@ xfs_ialloc_inode_init( | ||||
| 		/* Initialize the inode buffers and log them appropriately. */ | ||||
| 		fbuf->b_ops = &xfs_inode_buf_ops; | ||||
| 		xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); | ||||
| 		for (i = 0; i < ninodes; i++) { | ||||
| 		for (i = 0; i < inodes_per_cluster; i++) { | ||||
| 			int	ioffset = i << mp->m_sb.sb_inodelog; | ||||
| 			uint	isize = xfs_dinode_size(version); | ||||
| 
 | ||||
| @ -329,11 +322,11 @@ xfs_ialloc_ag_alloc( | ||||
| 	 * Locking will ensure that we don't have two callers in here | ||||
| 	 * at one time. | ||||
| 	 */ | ||||
| 	newlen = XFS_IALLOC_INODES(args.mp); | ||||
| 	newlen = args.mp->m_ialloc_inos; | ||||
| 	if (args.mp->m_maxicount && | ||||
| 	    args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) | ||||
| 		return XFS_ERROR(ENOSPC); | ||||
| 	args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); | ||||
| 	args.minlen = args.maxlen = args.mp->m_ialloc_blks; | ||||
| 	/*
 | ||||
| 	 * First try to allocate inodes contiguous with the last-allocated | ||||
| 	 * chunk of inodes.  If the filesystem is striped, this will fill | ||||
| @ -343,7 +336,7 @@ xfs_ialloc_ag_alloc( | ||||
| 	newino = be32_to_cpu(agi->agi_newino); | ||||
| 	agno = be32_to_cpu(agi->agi_seqno); | ||||
| 	args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + | ||||
| 			XFS_IALLOC_BLOCKS(args.mp); | ||||
| 		     args.mp->m_ialloc_blks; | ||||
| 	if (likely(newino != NULLAGINO && | ||||
| 		  (args.agbno < be32_to_cpu(agi->agi_length)))) { | ||||
| 		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); | ||||
| @ -585,7 +578,7 @@ xfs_ialloc_ag_select( | ||||
| 		 * Is there enough free space for the file plus a block of | ||||
| 		 * inodes? (if we need to allocate some)? | ||||
| 		 */ | ||||
| 		ineed = XFS_IALLOC_BLOCKS(mp); | ||||
| 		ineed = mp->m_ialloc_blks; | ||||
| 		longest = pag->pagf_longest; | ||||
| 		if (!longest) | ||||
| 			longest = pag->pagf_flcount > 0; | ||||
| @ -999,7 +992,7 @@ xfs_dialloc( | ||||
| 	 * inode. | ||||
| 	 */ | ||||
| 	if (mp->m_maxicount && | ||||
| 	    mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) { | ||||
| 	    mp->m_sb.sb_icount + mp->m_ialloc_inos > mp->m_maxicount) { | ||||
| 		noroom = 1; | ||||
| 		okalloc = 0; | ||||
| 	} | ||||
| @ -1202,7 +1195,7 @@ xfs_difree( | ||||
| 	 * When an inode cluster is free, it becomes eligible for removal | ||||
| 	 */ | ||||
| 	if (!(mp->m_flags & XFS_MOUNT_IKEEP) && | ||||
| 	    (rec.ir_freecount == XFS_IALLOC_INODES(mp))) { | ||||
| 	    (rec.ir_freecount == mp->m_ialloc_inos)) { | ||||
| 
 | ||||
| 		*delete = 1; | ||||
| 		*first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); | ||||
| @ -1212,7 +1205,7 @@ xfs_difree( | ||||
| 		 * AGI and Superblock inode counts, and mark the disk space | ||||
| 		 * to be freed when the transaction is committed. | ||||
| 		 */ | ||||
| 		ilen = XFS_IALLOC_INODES(mp); | ||||
| 		ilen = mp->m_ialloc_inos; | ||||
| 		be32_add_cpu(&agi->agi_count, -ilen); | ||||
| 		be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); | ||||
| 		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); | ||||
| @ -1228,9 +1221,9 @@ xfs_difree( | ||||
| 			goto error0; | ||||
| 		} | ||||
| 
 | ||||
| 		xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, | ||||
| 				agno, XFS_INO_TO_AGBNO(mp,rec.ir_startino)), | ||||
| 				XFS_IALLOC_BLOCKS(mp), flist, mp); | ||||
| 		xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno, | ||||
| 				  XFS_AGINO_TO_AGBNO(mp, rec.ir_startino)), | ||||
| 				  mp->m_ialloc_blks, flist, mp); | ||||
| 	} else { | ||||
| 		*delete = 0; | ||||
| 
 | ||||
| @ -1311,7 +1304,7 @@ xfs_imap_lookup( | ||||
| 
 | ||||
| 	/* check that the returned record contains the required inode */ | ||||
| 	if (rec.ir_startino > agino || | ||||
| 	    rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino) | ||||
| 	    rec.ir_startino + mp->m_ialloc_inos <= agino) | ||||
| 		return EINVAL; | ||||
| 
 | ||||
| 	/* for untrusted inodes check it is allocated first */ | ||||
| @ -1384,7 +1377,7 @@ xfs_imap( | ||||
| 		return XFS_ERROR(EINVAL); | ||||
| 	} | ||||
| 
 | ||||
| 	blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; | ||||
| 	blks_per_cluster = xfs_icluster_size_fsb(mp); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * For bulkstat and handle lookups, we have an untrusted inode number | ||||
| @ -1405,7 +1398,7 @@ xfs_imap( | ||||
| 	 * If the inode cluster size is the same as the blocksize or | ||||
| 	 * smaller we get to the buffer by simple arithmetics. | ||||
| 	 */ | ||||
| 	if (XFS_INODE_CLUSTER_SIZE(mp) <= mp->m_sb.sb_blocksize) { | ||||
| 	if (blks_per_cluster == 1) { | ||||
| 		offset = XFS_INO_TO_OFFSET(mp, ino); | ||||
| 		ASSERT(offset < mp->m_sb.sb_inopblock); | ||||
| 
 | ||||
|  | ||||
| @ -25,17 +25,18 @@ struct xfs_mount; | ||||
| struct xfs_trans; | ||||
| struct xfs_btree_cur; | ||||
| 
 | ||||
| /*
 | ||||
|  * Allocation parameters for inode allocation. | ||||
|  */ | ||||
| #define	XFS_IALLOC_INODES(mp)	(mp)->m_ialloc_inos | ||||
| #define	XFS_IALLOC_BLOCKS(mp)	(mp)->m_ialloc_blks | ||||
| 
 | ||||
| /*
 | ||||
|  * Move inodes in clusters of this size. | ||||
|  */ | ||||
| /* Move inodes in clusters of this size */ | ||||
| #define	XFS_INODE_BIG_CLUSTER_SIZE	8192 | ||||
| #define	XFS_INODE_CLUSTER_SIZE(mp)	(mp)->m_inode_cluster_size | ||||
| 
 | ||||
| /* Calculate and return the number of filesystem blocks per inode cluster */ | ||||
| static inline int | ||||
| xfs_icluster_size_fsb( | ||||
| 	struct xfs_mount	*mp) | ||||
| { | ||||
| 	if (mp->m_sb.sb_blocksize >= mp->m_inode_cluster_size) | ||||
| 		return 1; | ||||
| 	return mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Make an inode pointer out of the buffer/offset. | ||||
|  | ||||
| @ -28,6 +28,7 @@ | ||||
| #include "xfs_trans_priv.h" | ||||
| #include "xfs_error.h" | ||||
| #include "xfs_icreate_item.h" | ||||
| #include "xfs_log.h" | ||||
| 
 | ||||
| kmem_zone_t	*xfs_icreate_zone;		/* inode create item zone */ | ||||
| 
 | ||||
| @ -58,13 +59,14 @@ xfs_icreate_item_size( | ||||
| STATIC void | ||||
| xfs_icreate_item_format( | ||||
| 	struct xfs_log_item	*lip, | ||||
| 	struct xfs_log_iovec	*log_vector) | ||||
| 	struct xfs_log_vec	*lv) | ||||
| { | ||||
| 	struct xfs_icreate_item	*icp = ICR_ITEM(lip); | ||||
| 	struct xfs_log_iovec	*vecp = NULL; | ||||
| 
 | ||||
| 	log_vector->i_addr = (xfs_caddr_t)&icp->ic_format; | ||||
| 	log_vector->i_len  = sizeof(struct xfs_icreate_log); | ||||
| 	log_vector->i_type = XLOG_REG_TYPE_ICREATE; | ||||
| 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ICREATE, | ||||
| 			&icp->ic_format, | ||||
| 			sizeof(struct xfs_icreate_log)); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
|  | ||||
| @ -77,48 +77,44 @@ xfs_get_extsz_hint( | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This is a wrapper routine around the xfs_ilock() routine used to centralize | ||||
|  * some grungy code.  It is used in places that wish to lock the inode solely | ||||
|  * for reading the extents.  The reason these places can't just call | ||||
|  * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the | ||||
|  * extents from disk for a file in b-tree format.  If the inode is in b-tree | ||||
|  * format, then we need to lock the inode exclusively until the extents are read | ||||
|  * in.  Locking it exclusively all the time would limit our parallelism | ||||
|  * unnecessarily, though.  What we do instead is check to see if the extents | ||||
|  * have been read in yet, and only lock the inode exclusively if they have not. | ||||
|  * These two are wrapper routines around the xfs_ilock() routine used to | ||||
|  * centralize some grungy code.  They are used in places that wish to lock the | ||||
|  * inode solely for reading the extents.  The reason these places can't just | ||||
|  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to | ||||
|  * bringing in of the extents from disk for a file in b-tree format.  If the | ||||
|  * inode is in b-tree format, then we need to lock the inode exclusively until | ||||
|  * the extents are read in.  Locking it exclusively all the time would limit | ||||
|  * our parallelism unnecessarily, though.  What we do instead is check to see | ||||
|  * if the extents have been read in yet, and only lock the inode exclusively | ||||
|  * if they have not. | ||||
|  * | ||||
|  * The function returns a value which should be given to the corresponding | ||||
|  * xfs_iunlock_map_shared().  This value is the mode in which the lock was | ||||
|  * actually taken. | ||||
|  * The functions return a value which should be given to the corresponding | ||||
|  * xfs_iunlock() call. | ||||
|  */ | ||||
| uint | ||||
| xfs_ilock_map_shared( | ||||
| 	xfs_inode_t	*ip) | ||||
| xfs_ilock_data_map_shared( | ||||
| 	struct xfs_inode	*ip) | ||||
| { | ||||
| 	uint	lock_mode; | ||||
| 	uint			lock_mode = XFS_ILOCK_SHARED; | ||||
| 
 | ||||
| 	if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) && | ||||
| 	    ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) { | ||||
| 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE && | ||||
| 	    (ip->i_df.if_flags & XFS_IFEXTENTS) == 0) | ||||
| 		lock_mode = XFS_ILOCK_EXCL; | ||||
| 	} else { | ||||
| 		lock_mode = XFS_ILOCK_SHARED; | ||||
| 	} | ||||
| 
 | ||||
| 	xfs_ilock(ip, lock_mode); | ||||
| 
 | ||||
| 	return lock_mode; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This is simply the unlock routine to go with xfs_ilock_map_shared(). | ||||
|  * All it does is call xfs_iunlock() with the given lock_mode. | ||||
|  */ | ||||
| void | ||||
| xfs_iunlock_map_shared( | ||||
| 	xfs_inode_t	*ip, | ||||
| 	unsigned int	lock_mode) | ||||
| uint | ||||
| xfs_ilock_attr_map_shared( | ||||
| 	struct xfs_inode	*ip) | ||||
| { | ||||
| 	xfs_iunlock(ip, lock_mode); | ||||
| 	uint			lock_mode = XFS_ILOCK_SHARED; | ||||
| 
 | ||||
| 	if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE && | ||||
| 	    (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0) | ||||
| 		lock_mode = XFS_ILOCK_EXCL; | ||||
| 	xfs_ilock(ip, lock_mode); | ||||
| 	return lock_mode; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -588,9 +584,9 @@ xfs_lookup( | ||||
| 	if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | ||||
| 		return XFS_ERROR(EIO); | ||||
| 
 | ||||
| 	lock_mode = xfs_ilock_map_shared(dp); | ||||
| 	lock_mode = xfs_ilock_data_map_shared(dp); | ||||
| 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); | ||||
| 	xfs_iunlock_map_shared(dp, lock_mode); | ||||
| 	xfs_iunlock(dp, lock_mode); | ||||
| 
 | ||||
| 	if (error) | ||||
| 		goto out; | ||||
| @ -2141,8 +2137,8 @@ xfs_ifree_cluster( | ||||
| { | ||||
| 	xfs_mount_t		*mp = free_ip->i_mount; | ||||
| 	int			blks_per_cluster; | ||||
| 	int			inodes_per_cluster; | ||||
| 	int			nbufs; | ||||
| 	int			ninodes; | ||||
| 	int			i, j; | ||||
| 	xfs_daddr_t		blkno; | ||||
| 	xfs_buf_t		*bp; | ||||
| @ -2152,18 +2148,11 @@ xfs_ifree_cluster( | ||||
| 	struct xfs_perag	*pag; | ||||
| 
 | ||||
| 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); | ||||
| 	if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { | ||||
| 		blks_per_cluster = 1; | ||||
| 		ninodes = mp->m_sb.sb_inopblock; | ||||
| 		nbufs = XFS_IALLOC_BLOCKS(mp); | ||||
| 	} else { | ||||
| 		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / | ||||
| 					mp->m_sb.sb_blocksize; | ||||
| 		ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; | ||||
| 		nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; | ||||
| 	} | ||||
| 	blks_per_cluster = xfs_icluster_size_fsb(mp); | ||||
| 	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; | ||||
| 	nbufs = mp->m_ialloc_blks / blks_per_cluster; | ||||
| 
 | ||||
| 	for (j = 0; j < nbufs; j++, inum += ninodes) { | ||||
| 	for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) { | ||||
| 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), | ||||
| 					 XFS_INO_TO_AGBNO(mp, inum)); | ||||
| 
 | ||||
| @ -2225,7 +2214,7 @@ xfs_ifree_cluster( | ||||
| 		 * transaction stale above, which means there is no point in | ||||
| 		 * even trying to lock them. | ||||
| 		 */ | ||||
| 		for (i = 0; i < ninodes; i++) { | ||||
| 		for (i = 0; i < inodes_per_cluster; i++) { | ||||
| retry: | ||||
| 			rcu_read_lock(); | ||||
| 			ip = radix_tree_lookup(&pag->pag_ici_root, | ||||
| @ -2906,13 +2895,13 @@ xfs_iflush_cluster( | ||||
| 
 | ||||
| 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | ||||
| 
 | ||||
| 	inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; | ||||
| 	inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; | ||||
| 	ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); | ||||
| 	ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); | ||||
| 	if (!ilist) | ||||
| 		goto out_put; | ||||
| 
 | ||||
| 	mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); | ||||
| 	mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1); | ||||
| 	first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; | ||||
| 	rcu_read_lock(); | ||||
| 	/* really need a gang lookup range call here */ | ||||
|  | ||||
| @ -337,8 +337,8 @@ int		xfs_ilock_nowait(xfs_inode_t *, uint); | ||||
| void		xfs_iunlock(xfs_inode_t *, uint); | ||||
| void		xfs_ilock_demote(xfs_inode_t *, uint); | ||||
| int		xfs_isilocked(xfs_inode_t *, uint); | ||||
| uint		xfs_ilock_map_shared(xfs_inode_t *); | ||||
| void		xfs_iunlock_map_shared(xfs_inode_t *, uint); | ||||
| uint		xfs_ilock_data_map_shared(struct xfs_inode *); | ||||
| uint		xfs_ilock_attr_map_shared(struct xfs_inode *); | ||||
| int		xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t, | ||||
| 			   xfs_nlink_t, xfs_dev_t, prid_t, int, | ||||
| 			   struct xfs_buf **, xfs_inode_t **); | ||||
|  | ||||
| @ -431,6 +431,8 @@ xfs_iread_extents( | ||||
| 	xfs_ifork_t	*ifp; | ||||
| 	xfs_extnum_t	nextents; | ||||
| 
 | ||||
| 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||||
| 
 | ||||
| 	if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { | ||||
| 		XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, | ||||
| 				 ip->i_mount); | ||||
| @ -721,15 +723,16 @@ xfs_idestroy_fork( | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * xfs_iextents_copy() | ||||
|  * Convert in-core extents to on-disk form | ||||
|  * | ||||
|  * This is called to copy the REAL extents (as opposed to the delayed | ||||
|  * allocation extents) from the inode into the given buffer.  It | ||||
|  * returns the number of bytes copied into the buffer. | ||||
|  * For either the data or attr fork in extent format, we need to endian convert | ||||
|  * the in-core extent as we place them into the on-disk inode. | ||||
|  * | ||||
|  * If there are no delayed allocation extents, then we can just | ||||
|  * memcpy() the extents into the buffer.  Otherwise, we need to | ||||
|  * examine each extent in turn and skip those which are delayed. | ||||
|  * In the case of the data fork, the in-core and on-disk fork sizes can be | ||||
|  * different due to delayed allocation extents. We only copy on-disk extents | ||||
|  * here, so callers must always use the physical fork size to determine the | ||||
|  * size of the buffer passed to this routine.  We will return the size actually | ||||
|  * used. | ||||
|  */ | ||||
| int | ||||
| xfs_iextents_copy( | ||||
|  | ||||
| @ -30,6 +30,7 @@ | ||||
| #include "xfs_trace.h" | ||||
| #include "xfs_trans_priv.h" | ||||
| #include "xfs_dinode.h" | ||||
| #include "xfs_log.h" | ||||
| 
 | ||||
| 
 | ||||
| kmem_zone_t	*xfs_ili_zone;		/* inode log item zone */ | ||||
| @ -39,6 +40,85 @@ static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip) | ||||
| 	return container_of(lip, struct xfs_inode_log_item, ili_item); | ||||
| } | ||||
| 
 | ||||
| STATIC void | ||||
| xfs_inode_item_data_fork_size( | ||||
| 	struct xfs_inode_log_item *iip, | ||||
| 	int			*nvecs, | ||||
| 	int			*nbytes) | ||||
| { | ||||
| 	struct xfs_inode	*ip = iip->ili_inode; | ||||
| 
 | ||||
| 	switch (ip->i_d.di_format) { | ||||
| 	case XFS_DINODE_FMT_EXTENTS: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_DEXT) && | ||||
| 		    ip->i_d.di_nextents > 0 && | ||||
| 		    ip->i_df.if_bytes > 0) { | ||||
| 			/* worst case, doesn't subtract delalloc extents */ | ||||
| 			*nbytes += XFS_IFORK_DSIZE(ip); | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 	case XFS_DINODE_FMT_BTREE: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_DBROOT) && | ||||
| 		    ip->i_df.if_broot_bytes > 0) { | ||||
| 			*nbytes += ip->i_df.if_broot_bytes; | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 	case XFS_DINODE_FMT_LOCAL: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_DDATA) && | ||||
| 		    ip->i_df.if_bytes > 0) { | ||||
| 			*nbytes += roundup(ip->i_df.if_bytes, 4); | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_DEV: | ||||
| 	case XFS_DINODE_FMT_UUID: | ||||
| 		break; | ||||
| 	default: | ||||
| 		ASSERT(0); | ||||
| 		break; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| STATIC void | ||||
| xfs_inode_item_attr_fork_size( | ||||
| 	struct xfs_inode_log_item *iip, | ||||
| 	int			*nvecs, | ||||
| 	int			*nbytes) | ||||
| { | ||||
| 	struct xfs_inode	*ip = iip->ili_inode; | ||||
| 
 | ||||
| 	switch (ip->i_d.di_aformat) { | ||||
| 	case XFS_DINODE_FMT_EXTENTS: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_AEXT) && | ||||
| 		    ip->i_d.di_anextents > 0 && | ||||
| 		    ip->i_afp->if_bytes > 0) { | ||||
| 			/* worst case, doesn't subtract unused space */ | ||||
| 			*nbytes += XFS_IFORK_ASIZE(ip); | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 	case XFS_DINODE_FMT_BTREE: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_ABROOT) && | ||||
| 		    ip->i_afp->if_broot_bytes > 0) { | ||||
| 			*nbytes += ip->i_afp->if_broot_bytes; | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 	case XFS_DINODE_FMT_LOCAL: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_ADATA) && | ||||
| 		    ip->i_afp->if_bytes > 0) { | ||||
| 			*nbytes += roundup(ip->i_afp->if_bytes, 4); | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 	default: | ||||
| 		ASSERT(0); | ||||
| 		break; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This returns the number of iovecs needed to log the given inode item. | ||||
| @ -60,175 +140,48 @@ xfs_inode_item_size( | ||||
| 	*nbytes += sizeof(struct xfs_inode_log_format) + | ||||
| 		   xfs_icdinode_size(ip->i_d.di_version); | ||||
| 
 | ||||
| 	switch (ip->i_d.di_format) { | ||||
| 	case XFS_DINODE_FMT_EXTENTS: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_DEXT) && | ||||
| 		    ip->i_d.di_nextents > 0 && | ||||
| 		    ip->i_df.if_bytes > 0) { | ||||
| 			/* worst case, doesn't subtract delalloc extents */ | ||||
| 			*nbytes += XFS_IFORK_DSIZE(ip); | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_BTREE: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_DBROOT) && | ||||
| 		    ip->i_df.if_broot_bytes > 0) { | ||||
| 			*nbytes += ip->i_df.if_broot_bytes; | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_LOCAL: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_DDATA) && | ||||
| 		    ip->i_df.if_bytes > 0) { | ||||
| 			*nbytes += roundup(ip->i_df.if_bytes, 4); | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_DEV: | ||||
| 	case XFS_DINODE_FMT_UUID: | ||||
| 		break; | ||||
| 
 | ||||
| 	default: | ||||
| 		ASSERT(0); | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
| 	if (!XFS_IFORK_Q(ip)) | ||||
| 		return; | ||||
| 
 | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Log any necessary attribute data. | ||||
| 	 */ | ||||
| 	switch (ip->i_d.di_aformat) { | ||||
| 	case XFS_DINODE_FMT_EXTENTS: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_AEXT) && | ||||
| 		    ip->i_d.di_anextents > 0 && | ||||
| 		    ip->i_afp->if_bytes > 0) { | ||||
| 			/* worst case, doesn't subtract unused space */ | ||||
| 			*nbytes += XFS_IFORK_ASIZE(ip); | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_BTREE: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_ABROOT) && | ||||
| 		    ip->i_afp->if_broot_bytes > 0) { | ||||
| 			*nbytes += ip->i_afp->if_broot_bytes; | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_LOCAL: | ||||
| 		if ((iip->ili_fields & XFS_ILOG_ADATA) && | ||||
| 		    ip->i_afp->if_bytes > 0) { | ||||
| 			*nbytes += roundup(ip->i_afp->if_bytes, 4); | ||||
| 			*nvecs += 1; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	default: | ||||
| 		ASSERT(0); | ||||
| 		break; | ||||
| 	} | ||||
| 	xfs_inode_item_data_fork_size(iip, nvecs, nbytes); | ||||
| 	if (XFS_IFORK_Q(ip)) | ||||
| 		xfs_inode_item_attr_fork_size(iip, nvecs, nbytes); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * xfs_inode_item_format_extents - convert in-core extents to on-disk form | ||||
|  * | ||||
|  * For either the data or attr fork in extent format, we need to endian convert | ||||
|  * the in-core extent as we place them into the on-disk inode. In this case, we | ||||
|  * need to do this conversion before we write the extents into the log. Because | ||||
|  * we don't have the disk inode to write into here, we allocate a buffer and | ||||
|  * format the extents into it via xfs_iextents_copy(). We free the buffer in | ||||
|  * the unlock routine after the copy for the log has been made. | ||||
|  * | ||||
|  * In the case of the data fork, the in-core and on-disk fork sizes can be | ||||
|  * different due to delayed allocation extents. We only log on-disk extents | ||||
|  * here, so always use the physical fork size to determine the size of the | ||||
|  * buffer we need to allocate. | ||||
|  * If this is a v1 format inode, then we need to log it as such.  This means | ||||
|  * that we have to copy the link count from the new field to the old.  We | ||||
|  * don't have to worry about the new fields, because nothing trusts them as | ||||
|  * long as the old inode version number is there. | ||||
|  */ | ||||
| STATIC void | ||||
| xfs_inode_item_format_extents( | ||||
| 	struct xfs_inode	*ip, | ||||
| 	struct xfs_log_iovec	*vecp, | ||||
| 	int			whichfork, | ||||
| 	int			type) | ||||
| xfs_inode_item_format_v1_inode( | ||||
| 	struct xfs_inode	*ip) | ||||
| { | ||||
| 	xfs_bmbt_rec_t		*ext_buffer; | ||||
| 
 | ||||
| 	ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP); | ||||
| 	if (whichfork == XFS_DATA_FORK) | ||||
| 		ip->i_itemp->ili_extents_buf = ext_buffer; | ||||
| 	else | ||||
| 		ip->i_itemp->ili_aextents_buf = ext_buffer; | ||||
| 
 | ||||
| 	vecp->i_addr = ext_buffer; | ||||
| 	vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork); | ||||
| 	vecp->i_type = type; | ||||
| 	if (!xfs_sb_version_hasnlink(&ip->i_mount->m_sb)) { | ||||
| 		/*
 | ||||
| 		 * Convert it back. | ||||
| 		 */ | ||||
| 		ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); | ||||
| 		ip->i_d.di_onlink = ip->i_d.di_nlink; | ||||
| 	} else { | ||||
| 		/*
 | ||||
| 		 * The superblock version has already been bumped, | ||||
| 		 * so just make the conversion to the new inode | ||||
| 		 * format permanent. | ||||
| 		 */ | ||||
| 		ip->i_d.di_version = 2; | ||||
| 		ip->i_d.di_onlink = 0; | ||||
| 		memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This is called to fill in the vector of log iovecs for the | ||||
|  * given inode log item.  It fills the first item with an inode | ||||
|  * log format structure, the second with the on-disk inode structure, | ||||
|  * and a possible third and/or fourth with the inode data/extents/b-tree | ||||
|  * root and inode attributes data/extents/b-tree root. | ||||
|  */ | ||||
| STATIC void | ||||
| xfs_inode_item_format( | ||||
| 	struct xfs_log_item	*lip, | ||||
| 	struct xfs_log_iovec	*vecp) | ||||
| xfs_inode_item_format_data_fork( | ||||
| 	struct xfs_inode_log_item *iip, | ||||
| 	struct xfs_inode_log_format *ilf, | ||||
| 	struct xfs_log_vec	*lv, | ||||
| 	struct xfs_log_iovec	**vecp) | ||||
| { | ||||
| 	struct xfs_inode_log_item *iip = INODE_ITEM(lip); | ||||
| 	struct xfs_inode	*ip = iip->ili_inode; | ||||
| 	uint			nvecs; | ||||
| 	size_t			data_bytes; | ||||
| 	xfs_mount_t		*mp; | ||||
| 
 | ||||
| 	vecp->i_addr = &iip->ili_format; | ||||
| 	vecp->i_len  = sizeof(xfs_inode_log_format_t); | ||||
| 	vecp->i_type = XLOG_REG_TYPE_IFORMAT; | ||||
| 	vecp++; | ||||
| 	nvecs	     = 1; | ||||
| 
 | ||||
| 	vecp->i_addr = &ip->i_d; | ||||
| 	vecp->i_len  = xfs_icdinode_size(ip->i_d.di_version); | ||||
| 	vecp->i_type = XLOG_REG_TYPE_ICORE; | ||||
| 	vecp++; | ||||
| 	nvecs++; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If this is really an old format inode, then we need to | ||||
| 	 * log it as such.  This means that we have to copy the link | ||||
| 	 * count from the new field to the old.  We don't have to worry | ||||
| 	 * about the new fields, because nothing trusts them as long as | ||||
| 	 * the old inode version number is there.  If the superblock already | ||||
| 	 * has a new version number, then we don't bother converting back. | ||||
| 	 */ | ||||
| 	mp = ip->i_mount; | ||||
| 	ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb)); | ||||
| 	if (ip->i_d.di_version == 1) { | ||||
| 		if (!xfs_sb_version_hasnlink(&mp->m_sb)) { | ||||
| 			/*
 | ||||
| 			 * Convert it back. | ||||
| 			 */ | ||||
| 			ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); | ||||
| 			ip->i_d.di_onlink = ip->i_d.di_nlink; | ||||
| 		} else { | ||||
| 			/*
 | ||||
| 			 * The superblock version has already been bumped, | ||||
| 			 * so just make the conversion to the new inode | ||||
| 			 * format permanent. | ||||
| 			 */ | ||||
| 			ip->i_d.di_version = 2; | ||||
| 			ip->i_d.di_onlink = 0; | ||||
| 			memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	switch (ip->i_d.di_format) { | ||||
| 	case XFS_DINODE_FMT_EXTENTS: | ||||
| @ -239,36 +192,23 @@ xfs_inode_item_format( | ||||
| 		if ((iip->ili_fields & XFS_ILOG_DEXT) && | ||||
| 		    ip->i_d.di_nextents > 0 && | ||||
| 		    ip->i_df.if_bytes > 0) { | ||||
| 			struct xfs_bmbt_rec *p; | ||||
| 
 | ||||
| 			ASSERT(ip->i_df.if_u1.if_extents != NULL); | ||||
| 			ASSERT(ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) > 0); | ||||
| 			ASSERT(iip->ili_extents_buf == NULL); | ||||
| 
 | ||||
| #ifdef XFS_NATIVE_HOST | ||||
|                        if (ip->i_d.di_nextents == ip->i_df.if_bytes / | ||||
|                                                (uint)sizeof(xfs_bmbt_rec_t)) { | ||||
| 				/*
 | ||||
| 				 * There are no delayed allocation | ||||
| 				 * extents, so just point to the | ||||
| 				 * real extents array. | ||||
| 				 */ | ||||
| 				vecp->i_addr = ip->i_df.if_u1.if_extents; | ||||
| 				vecp->i_len = ip->i_df.if_bytes; | ||||
| 				vecp->i_type = XLOG_REG_TYPE_IEXT; | ||||
| 			} else | ||||
| #endif | ||||
| 			{ | ||||
| 				xfs_inode_item_format_extents(ip, vecp, | ||||
| 					XFS_DATA_FORK, XLOG_REG_TYPE_IEXT); | ||||
| 			} | ||||
| 			ASSERT(vecp->i_len <= ip->i_df.if_bytes); | ||||
| 			iip->ili_format.ilf_dsize = vecp->i_len; | ||||
| 			vecp++; | ||||
| 			nvecs++; | ||||
| 			p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT); | ||||
| 			data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK); | ||||
| 			xlog_finish_iovec(lv, *vecp, data_bytes); | ||||
| 
 | ||||
| 			ASSERT(data_bytes <= ip->i_df.if_bytes); | ||||
| 
 | ||||
| 			ilf->ilf_dsize = data_bytes; | ||||
| 			ilf->ilf_size++; | ||||
| 		} else { | ||||
| 			iip->ili_fields &= ~XFS_ILOG_DEXT; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_BTREE: | ||||
| 		iip->ili_fields &= | ||||
| 			~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | | ||||
| @ -277,80 +217,70 @@ xfs_inode_item_format( | ||||
| 		if ((iip->ili_fields & XFS_ILOG_DBROOT) && | ||||
| 		    ip->i_df.if_broot_bytes > 0) { | ||||
| 			ASSERT(ip->i_df.if_broot != NULL); | ||||
| 			vecp->i_addr = ip->i_df.if_broot; | ||||
| 			vecp->i_len = ip->i_df.if_broot_bytes; | ||||
| 			vecp->i_type = XLOG_REG_TYPE_IBROOT; | ||||
| 			vecp++; | ||||
| 			nvecs++; | ||||
| 			iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes; | ||||
| 			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IBROOT, | ||||
| 					ip->i_df.if_broot, | ||||
| 					ip->i_df.if_broot_bytes); | ||||
| 			ilf->ilf_dsize = ip->i_df.if_broot_bytes; | ||||
| 			ilf->ilf_size++; | ||||
| 		} else { | ||||
| 			ASSERT(!(iip->ili_fields & | ||||
| 				 XFS_ILOG_DBROOT)); | ||||
| 			iip->ili_fields &= ~XFS_ILOG_DBROOT; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_LOCAL: | ||||
| 		iip->ili_fields &= | ||||
| 			~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | | ||||
| 			  XFS_ILOG_DEV | XFS_ILOG_UUID); | ||||
| 		if ((iip->ili_fields & XFS_ILOG_DDATA) && | ||||
| 		    ip->i_df.if_bytes > 0) { | ||||
| 			ASSERT(ip->i_df.if_u1.if_data != NULL); | ||||
| 			ASSERT(ip->i_d.di_size > 0); | ||||
| 
 | ||||
| 			vecp->i_addr = ip->i_df.if_u1.if_data; | ||||
| 			/*
 | ||||
| 			 * Round i_bytes up to a word boundary. | ||||
| 			 * The underlying memory is guaranteed to | ||||
| 			 * to be there by xfs_idata_realloc(). | ||||
| 			 */ | ||||
| 			data_bytes = roundup(ip->i_df.if_bytes, 4); | ||||
| 			ASSERT((ip->i_df.if_real_bytes == 0) || | ||||
| 			       (ip->i_df.if_real_bytes == data_bytes)); | ||||
| 			vecp->i_len = (int)data_bytes; | ||||
| 			vecp->i_type = XLOG_REG_TYPE_ILOCAL; | ||||
| 			vecp++; | ||||
| 			nvecs++; | ||||
| 			iip->ili_format.ilf_dsize = (unsigned)data_bytes; | ||||
| 			ASSERT(ip->i_df.if_real_bytes == 0 || | ||||
| 			       ip->i_df.if_real_bytes == data_bytes); | ||||
| 			ASSERT(ip->i_df.if_u1.if_data != NULL); | ||||
| 			ASSERT(ip->i_d.di_size > 0); | ||||
| 			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL, | ||||
| 					ip->i_df.if_u1.if_data, data_bytes); | ||||
| 			ilf->ilf_dsize = (unsigned)data_bytes; | ||||
| 			ilf->ilf_size++; | ||||
| 		} else { | ||||
| 			iip->ili_fields &= ~XFS_ILOG_DDATA; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_DEV: | ||||
| 		iip->ili_fields &= | ||||
| 			~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | | ||||
| 			  XFS_ILOG_DEXT | XFS_ILOG_UUID); | ||||
| 		if (iip->ili_fields & XFS_ILOG_DEV) { | ||||
| 			iip->ili_format.ilf_u.ilfu_rdev = | ||||
| 				ip->i_df.if_u2.if_rdev; | ||||
| 		} | ||||
| 		if (iip->ili_fields & XFS_ILOG_DEV) | ||||
| 			ilf->ilf_u.ilfu_rdev = ip->i_df.if_u2.if_rdev; | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_UUID: | ||||
| 		iip->ili_fields &= | ||||
| 			~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | | ||||
| 			  XFS_ILOG_DEXT | XFS_ILOG_DEV); | ||||
| 		if (iip->ili_fields & XFS_ILOG_UUID) { | ||||
| 			iip->ili_format.ilf_u.ilfu_uuid = | ||||
| 				ip->i_df.if_u2.if_uuid; | ||||
| 		} | ||||
| 		if (iip->ili_fields & XFS_ILOG_UUID) | ||||
| 			ilf->ilf_u.ilfu_uuid = ip->i_df.if_u2.if_uuid; | ||||
| 		break; | ||||
| 
 | ||||
| 	default: | ||||
| 		ASSERT(0); | ||||
| 		break; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If there are no attributes associated with the file, then we're done. | ||||
| 	 */ | ||||
| 	if (!XFS_IFORK_Q(ip)) { | ||||
| 		iip->ili_fields &= | ||||
| 			~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); | ||||
| 		goto out; | ||||
| 	} | ||||
| STATIC void | ||||
| xfs_inode_item_format_attr_fork( | ||||
| 	struct xfs_inode_log_item *iip, | ||||
| 	struct xfs_inode_log_format *ilf, | ||||
| 	struct xfs_log_vec	*lv, | ||||
| 	struct xfs_log_iovec	**vecp) | ||||
| { | ||||
| 	struct xfs_inode	*ip = iip->ili_inode; | ||||
| 	size_t			data_bytes; | ||||
| 
 | ||||
| 	switch (ip->i_d.di_aformat) { | ||||
| 	case XFS_DINODE_FMT_EXTENTS: | ||||
| @ -360,30 +290,22 @@ xfs_inode_item_format( | ||||
| 		if ((iip->ili_fields & XFS_ILOG_AEXT) && | ||||
| 		    ip->i_d.di_anextents > 0 && | ||||
| 		    ip->i_afp->if_bytes > 0) { | ||||
| 			struct xfs_bmbt_rec *p; | ||||
| 
 | ||||
| 			ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) == | ||||
| 				ip->i_d.di_anextents); | ||||
| 			ASSERT(ip->i_afp->if_u1.if_extents != NULL); | ||||
| #ifdef XFS_NATIVE_HOST | ||||
| 			/*
 | ||||
| 			 * There are not delayed allocation extents | ||||
| 			 * for attributes, so just point at the array. | ||||
| 			 */ | ||||
| 			vecp->i_addr = ip->i_afp->if_u1.if_extents; | ||||
| 			vecp->i_len = ip->i_afp->if_bytes; | ||||
| 			vecp->i_type = XLOG_REG_TYPE_IATTR_EXT; | ||||
| #else | ||||
| 			ASSERT(iip->ili_aextents_buf == NULL); | ||||
| 			xfs_inode_item_format_extents(ip, vecp, | ||||
| 					XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT); | ||||
| #endif | ||||
| 			iip->ili_format.ilf_asize = vecp->i_len; | ||||
| 			vecp++; | ||||
| 			nvecs++; | ||||
| 
 | ||||
| 			p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT); | ||||
| 			data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK); | ||||
| 			xlog_finish_iovec(lv, *vecp, data_bytes); | ||||
| 
 | ||||
| 			ilf->ilf_asize = data_bytes; | ||||
| 			ilf->ilf_size++; | ||||
| 		} else { | ||||
| 			iip->ili_fields &= ~XFS_ILOG_AEXT; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_BTREE: | ||||
| 		iip->ili_fields &= | ||||
| 			~(XFS_ILOG_ADATA | XFS_ILOG_AEXT); | ||||
| @ -392,61 +314,89 @@ xfs_inode_item_format( | ||||
| 		    ip->i_afp->if_broot_bytes > 0) { | ||||
| 			ASSERT(ip->i_afp->if_broot != NULL); | ||||
| 
 | ||||
| 			vecp->i_addr = ip->i_afp->if_broot; | ||||
| 			vecp->i_len = ip->i_afp->if_broot_bytes; | ||||
| 			vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT; | ||||
| 			vecp++; | ||||
| 			nvecs++; | ||||
| 			iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes; | ||||
| 			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_BROOT, | ||||
| 					ip->i_afp->if_broot, | ||||
| 					ip->i_afp->if_broot_bytes); | ||||
| 			ilf->ilf_asize = ip->i_afp->if_broot_bytes; | ||||
| 			ilf->ilf_size++; | ||||
| 		} else { | ||||
| 			iip->ili_fields &= ~XFS_ILOG_ABROOT; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	case XFS_DINODE_FMT_LOCAL: | ||||
| 		iip->ili_fields &= | ||||
| 			~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT); | ||||
| 
 | ||||
| 		if ((iip->ili_fields & XFS_ILOG_ADATA) && | ||||
| 		    ip->i_afp->if_bytes > 0) { | ||||
| 			ASSERT(ip->i_afp->if_u1.if_data != NULL); | ||||
| 
 | ||||
| 			vecp->i_addr = ip->i_afp->if_u1.if_data; | ||||
| 			/*
 | ||||
| 			 * Round i_bytes up to a word boundary. | ||||
| 			 * The underlying memory is guaranteed to | ||||
| 			 * to be there by xfs_idata_realloc(). | ||||
| 			 */ | ||||
| 			data_bytes = roundup(ip->i_afp->if_bytes, 4); | ||||
| 			ASSERT((ip->i_afp->if_real_bytes == 0) || | ||||
| 			       (ip->i_afp->if_real_bytes == data_bytes)); | ||||
| 			vecp->i_len = (int)data_bytes; | ||||
| 			vecp->i_type = XLOG_REG_TYPE_IATTR_LOCAL; | ||||
| 			vecp++; | ||||
| 			nvecs++; | ||||
| 			iip->ili_format.ilf_asize = (unsigned)data_bytes; | ||||
| 			ASSERT(ip->i_afp->if_real_bytes == 0 || | ||||
| 			       ip->i_afp->if_real_bytes == data_bytes); | ||||
| 			ASSERT(ip->i_afp->if_u1.if_data != NULL); | ||||
| 			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL, | ||||
| 					ip->i_afp->if_u1.if_data, | ||||
| 					data_bytes); | ||||
| 			ilf->ilf_asize = (unsigned)data_bytes; | ||||
| 			ilf->ilf_size++; | ||||
| 		} else { | ||||
| 			iip->ili_fields &= ~XFS_ILOG_ADATA; | ||||
| 		} | ||||
| 		break; | ||||
| 
 | ||||
| 	default: | ||||
| 		ASSERT(0); | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
| out: | ||||
| 	/*
 | ||||
| 	 * Now update the log format that goes out to disk from the in-core | ||||
| 	 * values.  We always write the inode core to make the arithmetic | ||||
| 	 * games in recovery easier, which isn't a big deal as just about any | ||||
| 	 * transaction would dirty it anyway. | ||||
| 	 */ | ||||
| 	iip->ili_format.ilf_fields = XFS_ILOG_CORE | | ||||
| 		(iip->ili_fields & ~XFS_ILOG_TIMESTAMP); | ||||
| 	iip->ili_format.ilf_size = nvecs; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This is called to fill in the vector of log iovecs for the given inode | ||||
|  * log item.  It fills the first item with an inode log format structure, | ||||
|  * the second with the on-disk inode structure, and a possible third and/or | ||||
|  * fourth with the inode data/extents/b-tree root and inode attributes | ||||
|  * data/extents/b-tree root. | ||||
|  */ | ||||
| STATIC void | ||||
| xfs_inode_item_format( | ||||
| 	struct xfs_log_item	*lip, | ||||
| 	struct xfs_log_vec	*lv) | ||||
| { | ||||
| 	struct xfs_inode_log_item *iip = INODE_ITEM(lip); | ||||
| 	struct xfs_inode	*ip = iip->ili_inode; | ||||
| 	struct xfs_inode_log_format *ilf; | ||||
| 	struct xfs_log_iovec	*vecp = NULL; | ||||
| 
 | ||||
| 	ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT); | ||||
| 	ilf->ilf_type = XFS_LI_INODE; | ||||
| 	ilf->ilf_ino = ip->i_ino; | ||||
| 	ilf->ilf_blkno = ip->i_imap.im_blkno; | ||||
| 	ilf->ilf_len = ip->i_imap.im_len; | ||||
| 	ilf->ilf_boffset = ip->i_imap.im_boffset; | ||||
| 	ilf->ilf_fields = XFS_ILOG_CORE; | ||||
| 	ilf->ilf_size = 2; /* format + core */ | ||||
| 	xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format)); | ||||
| 
 | ||||
| 	if (ip->i_d.di_version == 1) | ||||
| 		xfs_inode_item_format_v1_inode(ip); | ||||
| 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ICORE, | ||||
| 			&ip->i_d, | ||||
| 			xfs_icdinode_size(ip->i_d.di_version)); | ||||
| 
 | ||||
| 	xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); | ||||
| 	if (XFS_IFORK_Q(ip)) { | ||||
| 		xfs_inode_item_format_attr_fork(iip, ilf, lv, &vecp); | ||||
| 	} else { | ||||
| 		iip->ili_fields &= | ||||
| 			~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); | ||||
| 	} | ||||
| 
 | ||||
| 	/* update the format with the exact fields we actually logged */ | ||||
| 	ilf->ilf_fields |= (iip->ili_fields & ~XFS_ILOG_TIMESTAMP); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This is called to pin the inode associated with the inode log | ||||
| @ -563,27 +513,6 @@ xfs_inode_item_unlock( | ||||
| 	ASSERT(ip->i_itemp != NULL); | ||||
| 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If the inode needed a separate buffer with which to log | ||||
| 	 * its extents, then free it now. | ||||
| 	 */ | ||||
| 	if (iip->ili_extents_buf != NULL) { | ||||
| 		ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS); | ||||
| 		ASSERT(ip->i_d.di_nextents > 0); | ||||
| 		ASSERT(iip->ili_fields & XFS_ILOG_DEXT); | ||||
| 		ASSERT(ip->i_df.if_bytes > 0); | ||||
| 		kmem_free(iip->ili_extents_buf); | ||||
| 		iip->ili_extents_buf = NULL; | ||||
| 	} | ||||
| 	if (iip->ili_aextents_buf != NULL) { | ||||
| 		ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS); | ||||
| 		ASSERT(ip->i_d.di_anextents > 0); | ||||
| 		ASSERT(iip->ili_fields & XFS_ILOG_AEXT); | ||||
| 		ASSERT(ip->i_afp->if_bytes > 0); | ||||
| 		kmem_free(iip->ili_aextents_buf); | ||||
| 		iip->ili_aextents_buf = NULL; | ||||
| 	} | ||||
| 
 | ||||
| 	lock_flags = iip->ili_lock_flags; | ||||
| 	iip->ili_lock_flags = 0; | ||||
| 	if (lock_flags) | ||||
| @ -670,11 +599,6 @@ xfs_inode_item_init( | ||||
| 	iip->ili_inode = ip; | ||||
| 	xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE, | ||||
| 						&xfs_inode_item_ops); | ||||
| 	iip->ili_format.ilf_type = XFS_LI_INODE; | ||||
| 	iip->ili_format.ilf_ino = ip->i_ino; | ||||
| 	iip->ili_format.ilf_blkno = ip->i_imap.im_blkno; | ||||
| 	iip->ili_format.ilf_len = ip->i_imap.im_len; | ||||
| 	iip->ili_format.ilf_boffset = ip->i_imap.im_boffset; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -34,11 +34,6 @@ typedef struct xfs_inode_log_item { | ||||
| 	unsigned short		ili_logged;	   /* flushed logged data */ | ||||
| 	unsigned int		ili_last_fields;   /* fields when flushed */ | ||||
| 	unsigned int		ili_fields;	   /* fields to be logged */ | ||||
| 	struct xfs_bmbt_rec	*ili_extents_buf;  /* array of logged
 | ||||
| 						      data exts */ | ||||
| 	struct xfs_bmbt_rec	*ili_aextents_buf; /* array of logged
 | ||||
| 						      attr exts */ | ||||
| 	xfs_inode_log_format_t	ili_format;	   /* logged structure */ | ||||
| } xfs_inode_log_item_t; | ||||
| 
 | ||||
| static inline int xfs_inode_clean(xfs_inode_t *ip) | ||||
|  | ||||
| @ -112,15 +112,11 @@ xfs_find_handle( | ||||
| 		memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); | ||||
| 		hsize = sizeof(xfs_fsid_t); | ||||
| 	} else { | ||||
| 		int		lock_mode; | ||||
| 
 | ||||
| 		lock_mode = xfs_ilock_map_shared(ip); | ||||
| 		handle.ha_fid.fid_len = sizeof(xfs_fid_t) - | ||||
| 					sizeof(handle.ha_fid.fid_len); | ||||
| 		handle.ha_fid.fid_pad = 0; | ||||
| 		handle.ha_fid.fid_gen = ip->i_d.di_gen; | ||||
| 		handle.ha_fid.fid_ino = ip->i_ino; | ||||
| 		xfs_iunlock_map_shared(ip, lock_mode); | ||||
| 
 | ||||
| 		hsize = XFS_HSIZE(handle); | ||||
| 	} | ||||
|  | ||||
| @ -459,14 +459,12 @@ xfs_vn_getattr( | ||||
| 
 | ||||
| static void | ||||
| xfs_setattr_mode( | ||||
| 	struct xfs_trans	*tp, | ||||
| 	struct xfs_inode	*ip, | ||||
| 	struct iattr		*iattr) | ||||
| { | ||||
| 	struct inode	*inode = VFS_I(ip); | ||||
| 	umode_t		mode = iattr->ia_mode; | ||||
| 	struct inode		*inode = VFS_I(ip); | ||||
| 	umode_t			mode = iattr->ia_mode; | ||||
| 
 | ||||
| 	ASSERT(tp); | ||||
| 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||||
| 
 | ||||
| 	ip->i_d.di_mode &= S_IFMT; | ||||
| @ -476,6 +474,32 @@ xfs_setattr_mode( | ||||
| 	inode->i_mode |= mode & ~S_IFMT; | ||||
| } | ||||
| 
 | ||||
| static void | ||||
| xfs_setattr_time( | ||||
| 	struct xfs_inode	*ip, | ||||
| 	struct iattr		*iattr) | ||||
| { | ||||
| 	struct inode		*inode = VFS_I(ip); | ||||
| 
 | ||||
| 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||||
| 
 | ||||
| 	if (iattr->ia_valid & ATTR_ATIME) { | ||||
| 		inode->i_atime = iattr->ia_atime; | ||||
| 		ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; | ||||
| 		ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; | ||||
| 	} | ||||
| 	if (iattr->ia_valid & ATTR_CTIME) { | ||||
| 		inode->i_ctime = iattr->ia_ctime; | ||||
| 		ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; | ||||
| 		ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; | ||||
| 	} | ||||
| 	if (iattr->ia_valid & ATTR_MTIME) { | ||||
| 		inode->i_mtime = iattr->ia_mtime; | ||||
| 		ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; | ||||
| 		ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| int | ||||
| xfs_setattr_nonsize( | ||||
| 	struct xfs_inode	*ip, | ||||
| @ -630,30 +654,10 @@ xfs_setattr_nonsize( | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Change file access modes. | ||||
| 	 */ | ||||
| 	if (mask & ATTR_MODE) | ||||
| 		xfs_setattr_mode(tp, ip, iattr); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Change file access or modified times. | ||||
| 	 */ | ||||
| 	if (mask & ATTR_ATIME) { | ||||
| 		inode->i_atime = iattr->ia_atime; | ||||
| 		ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; | ||||
| 		ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; | ||||
| 	} | ||||
| 	if (mask & ATTR_CTIME) { | ||||
| 		inode->i_ctime = iattr->ia_ctime; | ||||
| 		ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; | ||||
| 		ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; | ||||
| 	} | ||||
| 	if (mask & ATTR_MTIME) { | ||||
| 		inode->i_mtime = iattr->ia_mtime; | ||||
| 		ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; | ||||
| 		ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; | ||||
| 	} | ||||
| 		xfs_setattr_mode(ip, iattr); | ||||
| 	if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) | ||||
| 		xfs_setattr_time(ip, iattr); | ||||
| 
 | ||||
| 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | ||||
| 
 | ||||
| @ -868,22 +872,10 @@ xfs_setattr_size( | ||||
| 		xfs_inode_clear_eofblocks_tag(ip); | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Change file access modes. | ||||
| 	 */ | ||||
| 	if (mask & ATTR_MODE) | ||||
| 		xfs_setattr_mode(tp, ip, iattr); | ||||
| 
 | ||||
| 	if (mask & ATTR_CTIME) { | ||||
| 		inode->i_ctime = iattr->ia_ctime; | ||||
| 		ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; | ||||
| 		ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; | ||||
| 	} | ||||
| 	if (mask & ATTR_MTIME) { | ||||
| 		inode->i_mtime = iattr->ia_mtime; | ||||
| 		ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; | ||||
| 		ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; | ||||
| 	} | ||||
| 		xfs_setattr_mode(ip, iattr); | ||||
| 	if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) | ||||
| 		xfs_setattr_time(ip, iattr); | ||||
| 
 | ||||
| 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | ||||
| 
 | ||||
|  | ||||
| @ -209,9 +209,8 @@ xfs_bulkstat( | ||||
| 	xfs_inobt_rec_incore_t	*irbuf;	/* start of irec buffer */ | ||||
| 	xfs_inobt_rec_incore_t	*irbufend; /* end of good irec buffer entries */ | ||||
| 	xfs_ino_t		lastino; /* last inode number returned */ | ||||
| 	int			nbcluster; /* # of blocks in a cluster */ | ||||
| 	int			nicluster; /* # of inodes in a cluster */ | ||||
| 	int			nimask;	/* mask for inode clusters */ | ||||
| 	int			blks_per_cluster; /* # of blocks per cluster */ | ||||
| 	int			inodes_per_cluster;/* # of inodes per cluster */ | ||||
| 	int			nirbuf;	/* size of irbuf */ | ||||
| 	int			rval;	/* return value error code */ | ||||
| 	int			tmp;	/* result value from btree calls */ | ||||
| @ -243,11 +242,8 @@ xfs_bulkstat( | ||||
| 	*done = 0; | ||||
| 	fmterror = 0; | ||||
| 	ubufp = ubuffer; | ||||
| 	nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? | ||||
| 		mp->m_sb.sb_inopblock : | ||||
| 		(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); | ||||
| 	nimask = ~(nicluster - 1); | ||||
| 	nbcluster = nicluster >> mp->m_sb.sb_inopblog; | ||||
| 	blks_per_cluster = xfs_icluster_size_fsb(mp); | ||||
| 	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; | ||||
| 	irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); | ||||
| 	if (!irbuf) | ||||
| 		return ENOMEM; | ||||
| @ -390,12 +386,12 @@ xfs_bulkstat( | ||||
| 				agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); | ||||
| 				for (chunkidx = 0; | ||||
| 				     chunkidx < XFS_INODES_PER_CHUNK; | ||||
| 				     chunkidx += nicluster, | ||||
| 				     agbno += nbcluster) { | ||||
| 					if (xfs_inobt_maskn(chunkidx, nicluster) | ||||
| 							& ~r.ir_free) | ||||
| 				     chunkidx += inodes_per_cluster, | ||||
| 				     agbno += blks_per_cluster) { | ||||
| 					if (xfs_inobt_maskn(chunkidx, | ||||
| 					    inodes_per_cluster) & ~r.ir_free) | ||||
| 						xfs_btree_reada_bufs(mp, agno, | ||||
| 							agbno, nbcluster, | ||||
| 							agbno, blks_per_cluster, | ||||
| 							&xfs_inode_buf_ops); | ||||
| 				} | ||||
| 				blk_finish_plug(&plug); | ||||
|  | ||||
| @ -30,6 +30,52 @@ struct xfs_log_vec { | ||||
| 
 | ||||
| #define XFS_LOG_VEC_ORDERED	(-1) | ||||
| 
 | ||||
| static inline void * | ||||
| xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, | ||||
| 		uint type) | ||||
| { | ||||
| 	struct xfs_log_iovec *vec = *vecp; | ||||
| 
 | ||||
| 	if (vec) { | ||||
| 		ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs); | ||||
| 		vec++; | ||||
| 	} else { | ||||
| 		vec = &lv->lv_iovecp[0]; | ||||
| 	} | ||||
| 
 | ||||
| 	vec->i_type = type; | ||||
| 	vec->i_addr = lv->lv_buf + lv->lv_buf_len; | ||||
| 
 | ||||
| 	ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t))); | ||||
| 
 | ||||
| 	*vecp = vec; | ||||
| 	return vec->i_addr; | ||||
| } | ||||
| 
 | ||||
| static inline void | ||||
| xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * We need to make sure the next buffer is naturally aligned for the | ||||
| 	 * biggest basic data type we put into it.  We already accounted for | ||||
| 	 * this when sizing the buffer. | ||||
| 	 */ | ||||
| 	lv->lv_buf_len += round_up(len, sizeof(uint64_t)); | ||||
| 	vec->i_len = len; | ||||
| } | ||||
| 
 | ||||
| static inline void * | ||||
| xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, | ||||
| 		uint type, void *data, int len) | ||||
| { | ||||
| 	void *buf; | ||||
| 
 | ||||
| 	buf = xlog_prepare_iovec(lv, vecp, type); | ||||
| 	memcpy(buf, data, len); | ||||
| 	xlog_finish_iovec(lv, *vecp, len); | ||||
| 	return buf; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Structure used to pass callback function and the function's argument | ||||
|  * to the log manager. | ||||
|  | ||||
| @ -82,36 +82,6 @@ xlog_cil_init_post_recovery( | ||||
| 								log->l_curr_block); | ||||
| } | ||||
| 
 | ||||
| STATIC int | ||||
| xlog_cil_lv_item_format( | ||||
| 	struct xfs_log_item	*lip, | ||||
| 	struct xfs_log_vec	*lv) | ||||
| { | ||||
| 	int	index; | ||||
| 	char	*ptr; | ||||
| 
 | ||||
| 	/* format new vectors into array */ | ||||
| 	lip->li_ops->iop_format(lip, lv->lv_iovecp); | ||||
| 
 | ||||
| 	/* copy data into existing array */ | ||||
| 	ptr = lv->lv_buf; | ||||
| 	for (index = 0; index < lv->lv_niovecs; index++) { | ||||
| 		struct xfs_log_iovec *vec = &lv->lv_iovecp[index]; | ||||
| 
 | ||||
| 		memcpy(ptr, vec->i_addr, vec->i_len); | ||||
| 		vec->i_addr = ptr; | ||||
| 		ptr += vec->i_len; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * some size calculations for log vectors over-estimate, so the caller | ||||
| 	 * doesn't know the amount of space actually used by the item. Return | ||||
| 	 * the byte count to the caller so they can check and store it | ||||
| 	 * appropriately. | ||||
| 	 */ | ||||
| 	return ptr - lv->lv_buf; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Prepare the log item for insertion into the CIL. Calculate the difference in | ||||
|  * log space and vectors it will consume, and if it is a new item pin it as | ||||
| @ -232,6 +202,13 @@ xlog_cil_insert_format_items( | ||||
| 			nbytes = 0; | ||||
| 		} | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * We 64-bit align the length of each iovec so that the start | ||||
| 		 * of the next one is naturally aligned.  We'll need to | ||||
| 		 * account for that slack space here. | ||||
| 		 */ | ||||
| 		nbytes += niovecs * sizeof(uint64_t); | ||||
| 
 | ||||
| 		/* grab the old item if it exists for reservation accounting */ | ||||
| 		old_lv = lip->li_lv; | ||||
| 
 | ||||
| @ -254,34 +231,27 @@ xlog_cil_insert_format_items( | ||||
| 			 */ | ||||
| 			*diff_iovecs -= lv->lv_niovecs; | ||||
| 			*diff_len -= lv->lv_buf_len; | ||||
| 
 | ||||
| 			/* Ensure the lv is set up according to ->iop_size */ | ||||
| 			lv->lv_niovecs = niovecs; | ||||
| 			lv->lv_buf = (char *)lv + buf_size - nbytes; | ||||
| 
 | ||||
| 			lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv); | ||||
| 			goto insert; | ||||
| 		} else { | ||||
| 			/* allocate new data chunk */ | ||||
| 			lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS); | ||||
| 			lv->lv_item = lip; | ||||
| 			lv->lv_size = buf_size; | ||||
| 			if (ordered) { | ||||
| 				/* track as an ordered logvec */ | ||||
| 				ASSERT(lip->li_lv == NULL); | ||||
| 				lv->lv_buf_len = XFS_LOG_VEC_ORDERED; | ||||
| 				goto insert; | ||||
| 			} | ||||
| 			lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; | ||||
| 		} | ||||
| 
 | ||||
| 		/* allocate new data chunk */ | ||||
| 		lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS); | ||||
| 		lv->lv_item = lip; | ||||
| 		lv->lv_size = buf_size; | ||||
| 		/* Ensure the lv is set up according to ->iop_size */ | ||||
| 		lv->lv_niovecs = niovecs; | ||||
| 		if (ordered) { | ||||
| 			/* track as an ordered logvec */ | ||||
| 			ASSERT(lip->li_lv == NULL); | ||||
| 			lv->lv_buf_len = XFS_LOG_VEC_ORDERED; | ||||
| 			goto insert; | ||||
| 		} | ||||
| 
 | ||||
| 		/* The allocated iovec region lies beyond the log vector. */ | ||||
| 		lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; | ||||
| 
 | ||||
| 		/* The allocated data region lies beyond the iovec region */ | ||||
| 		lv->lv_buf_len = 0; | ||||
| 		lv->lv_buf = (char *)lv + buf_size - nbytes; | ||||
| 
 | ||||
| 		lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv); | ||||
| 		lip->li_ops->iop_format(lip, lv); | ||||
| insert: | ||||
| 		ASSERT(lv->lv_buf_len <= nbytes); | ||||
| 		xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); | ||||
|  | ||||
| @ -1654,6 +1654,7 @@ xlog_recover_reorder_trans( | ||||
| 	int			pass) | ||||
| { | ||||
| 	xlog_recover_item_t	*item, *n; | ||||
| 	int			error = 0; | ||||
| 	LIST_HEAD(sort_list); | ||||
| 	LIST_HEAD(cancel_list); | ||||
| 	LIST_HEAD(buffer_list); | ||||
| @ -1695,9 +1696,17 @@ xlog_recover_reorder_trans( | ||||
| 				"%s: unrecognized type of log operation", | ||||
| 				__func__); | ||||
| 			ASSERT(0); | ||||
| 			return XFS_ERROR(EIO); | ||||
| 			/*
 | ||||
| 			 * return the remaining items back to the transaction | ||||
| 			 * item list so they can be freed in caller. | ||||
| 			 */ | ||||
| 			if (!list_empty(&sort_list)) | ||||
| 				list_splice_init(&sort_list, &trans->r_itemq); | ||||
| 			error = XFS_ERROR(EIO); | ||||
| 			goto out; | ||||
| 		} | ||||
| 	} | ||||
| out: | ||||
| 	ASSERT(list_empty(&sort_list)); | ||||
| 	if (!list_empty(&buffer_list)) | ||||
| 		list_splice(&buffer_list, &trans->r_itemq); | ||||
| @ -1707,7 +1716,7 @@ xlog_recover_reorder_trans( | ||||
| 		list_splice_tail(&inode_buffer_list, &trans->r_itemq); | ||||
| 	if (!list_empty(&cancel_list)) | ||||
| 		list_splice_tail(&cancel_list, &trans->r_itemq); | ||||
| 	return 0; | ||||
| 	return error; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -2517,19 +2526,19 @@ xlog_recover_buffer_pass2( | ||||
| 	 * | ||||
| 	 * Also make sure that only inode buffers with good sizes stay in | ||||
| 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block | ||||
| 	 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode | ||||
| 	 * or mp->m_inode_cluster_size bytes, whichever is bigger.  The inode | ||||
| 	 * buffers in the log can be a different size if the log was generated | ||||
| 	 * by an older kernel using unclustered inode buffers or a newer kernel | ||||
| 	 * running with a different inode cluster size.  Regardless, if the | ||||
| 	 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE) | ||||
| 	 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep | ||||
| 	 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size) | ||||
| 	 * for *our* value of mp->m_inode_cluster_size, then we need to keep | ||||
| 	 * the buffer out of the buffer cache so that the buffer won't | ||||
| 	 * overlap with future reads of those inodes. | ||||
| 	 */ | ||||
| 	if (XFS_DINODE_MAGIC == | ||||
| 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && | ||||
| 	    (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, | ||||
| 			(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { | ||||
| 			(__uint32_t)log->l_mp->m_inode_cluster_size))) { | ||||
| 		xfs_buf_stale(bp); | ||||
| 		error = xfs_bwrite(bp); | ||||
| 	} else { | ||||
| @ -3202,10 +3211,10 @@ xlog_recover_do_icreate_pass2( | ||||
| 	} | ||||
| 
 | ||||
| 	/* existing allocation is fixed value */ | ||||
| 	ASSERT(count == XFS_IALLOC_INODES(mp)); | ||||
| 	ASSERT(length == XFS_IALLOC_BLOCKS(mp)); | ||||
| 	if (count != XFS_IALLOC_INODES(mp) || | ||||
| 	     length != XFS_IALLOC_BLOCKS(mp)) { | ||||
| 	ASSERT(count == mp->m_ialloc_inos); | ||||
| 	ASSERT(length == mp->m_ialloc_blks); | ||||
| 	if (count != mp->m_ialloc_inos || | ||||
| 	     length != mp->m_ialloc_blks) { | ||||
| 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); | ||||
| 		return EINVAL; | ||||
| 	} | ||||
| @ -3611,8 +3620,10 @@ xlog_recover_process_data( | ||||
| 				error = XFS_ERROR(EIO); | ||||
| 				break; | ||||
| 			} | ||||
| 			if (error) | ||||
| 			if (error) { | ||||
| 				xlog_recover_free_trans(trans); | ||||
| 				return error; | ||||
| 			} | ||||
| 		} | ||||
| 		dp += be32_to_cpu(ohead->oh_len); | ||||
| 		num_logops--; | ||||
|  | ||||
| @ -1222,16 +1222,18 @@ xfs_qm_dqiterate( | ||||
| 	lblkno = 0; | ||||
| 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); | ||||
| 	do { | ||||
| 		uint		lock_mode; | ||||
| 
 | ||||
| 		nmaps = XFS_DQITER_MAP_SIZE; | ||||
| 		/*
 | ||||
| 		 * We aren't changing the inode itself. Just changing | ||||
| 		 * some of its data. No new blocks are added here, and | ||||
| 		 * the inode is never added to the transaction. | ||||
| 		 */ | ||||
| 		xfs_ilock(qip, XFS_ILOCK_SHARED); | ||||
| 		lock_mode = xfs_ilock_data_map_shared(qip); | ||||
| 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, | ||||
| 				       map, &nmaps, 0); | ||||
| 		xfs_iunlock(qip, XFS_ILOCK_SHARED); | ||||
| 		xfs_iunlock(qip, lock_mode); | ||||
| 		if (error) | ||||
| 			break; | ||||
| 
 | ||||
|  | ||||
| @ -20,12 +20,28 @@ | ||||
| 
 | ||||
| #include "xfs_dquot_item.h" | ||||
| #include "xfs_dquot.h" | ||||
| #include "xfs_quota_priv.h" | ||||
| 
 | ||||
| struct xfs_inode; | ||||
| 
 | ||||
| extern struct kmem_zone	*xfs_qm_dqtrxzone; | ||||
| 
 | ||||
| /*
 | ||||
|  * Number of bmaps that we ask from bmapi when doing a quotacheck. | ||||
|  * We make this restriction to keep the memory usage to a minimum. | ||||
|  */ | ||||
| #define XFS_DQITER_MAP_SIZE	10 | ||||
| 
 | ||||
| #define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \ | ||||
| 	!dqp->q_core.d_blk_hardlimit && \ | ||||
| 	!dqp->q_core.d_blk_softlimit && \ | ||||
| 	!dqp->q_core.d_rtb_hardlimit && \ | ||||
| 	!dqp->q_core.d_rtb_softlimit && \ | ||||
| 	!dqp->q_core.d_ino_hardlimit && \ | ||||
| 	!dqp->q_core.d_ino_softlimit && \ | ||||
| 	!dqp->q_core.d_bcount && \ | ||||
| 	!dqp->q_core.d_rtbcount && \ | ||||
| 	!dqp->q_core.d_icount) | ||||
| 
 | ||||
| /*
 | ||||
|  * This defines the unit of allocation of dquots. | ||||
|  * Currently, it is just one file system block, and a 4K blk contains 30 | ||||
|  | ||||
| @ -278,7 +278,7 @@ xfs_qm_scall_trunc_qfiles( | ||||
| 	xfs_mount_t	*mp, | ||||
| 	uint		flags) | ||||
| { | ||||
| 	int		error = 0, error2 = 0; | ||||
| 	int		error; | ||||
| 
 | ||||
| 	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { | ||||
| 		xfs_debug(mp, "%s: flags=%x m_qflags=%x", | ||||
| @ -286,14 +286,20 @@ xfs_qm_scall_trunc_qfiles( | ||||
| 		return XFS_ERROR(EINVAL); | ||||
| 	} | ||||
| 
 | ||||
| 	if (flags & XFS_DQ_USER) | ||||
| 	if (flags & XFS_DQ_USER) { | ||||
| 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); | ||||
| 	if (flags & XFS_DQ_GROUP) | ||||
| 		error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); | ||||
| 		if (error) | ||||
| 			return error; | ||||
| 	} | ||||
| 	if (flags & XFS_DQ_GROUP) { | ||||
| 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); | ||||
| 		if (error) | ||||
| 			return error; | ||||
| 	} | ||||
| 	if (flags & XFS_DQ_PROJ) | ||||
| 		error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino); | ||||
| 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino); | ||||
| 
 | ||||
| 	return error ? error : error2; | ||||
| 	return error; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -1,42 +0,0 @@ | ||||
| /*
 | ||||
|  * Copyright (c) 2000-2003 Silicon Graphics, Inc. | ||||
|  * All Rights Reserved. | ||||
|  * | ||||
|  * This program is free software; you can redistribute it and/or | ||||
|  * modify it under the terms of the GNU General Public License as | ||||
|  * published by the Free Software Foundation. | ||||
|  * | ||||
|  * This program is distributed in the hope that it would be useful, | ||||
|  * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|  * GNU General Public License for more details. | ||||
|  * | ||||
|  * You should have received a copy of the GNU General Public License | ||||
|  * along with this program; if not, write the Free Software Foundation, | ||||
|  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | ||||
|  */ | ||||
| #ifndef __XFS_QUOTA_PRIV_H__ | ||||
| #define __XFS_QUOTA_PRIV_H__ | ||||
| 
 | ||||
| /*
 | ||||
|  * Number of bmaps that we ask from bmapi when doing a quotacheck. | ||||
|  * We make this restriction to keep the memory usage to a minimum. | ||||
|  */ | ||||
| #define XFS_DQITER_MAP_SIZE	10 | ||||
| 
 | ||||
| #define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \ | ||||
| 	!dqp->q_core.d_blk_hardlimit && \ | ||||
| 	!dqp->q_core.d_blk_softlimit && \ | ||||
| 	!dqp->q_core.d_rtb_hardlimit && \ | ||||
| 	!dqp->q_core.d_rtb_softlimit && \ | ||||
| 	!dqp->q_core.d_ino_hardlimit && \ | ||||
| 	!dqp->q_core.d_ino_softlimit && \ | ||||
| 	!dqp->q_core.d_bcount && \ | ||||
| 	!dqp->q_core.d_rtbcount && \ | ||||
| 	!dqp->q_core.d_icount) | ||||
| 
 | ||||
| #define DQFLAGTO_TYPESTR(d)	(((d)->dq_flags & XFS_DQ_USER) ? "USR" : \ | ||||
| 				 (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \ | ||||
| 				 (((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???"))) | ||||
| 
 | ||||
| #endif	/* __XFS_QUOTA_PRIV_H__ */ | ||||
| @ -64,7 +64,7 @@ typedef struct xfs_log_item { | ||||
| 
 | ||||
| struct xfs_item_ops { | ||||
| 	void (*iop_size)(xfs_log_item_t *, int *, int *); | ||||
| 	void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); | ||||
| 	void (*iop_format)(xfs_log_item_t *, struct xfs_log_vec *); | ||||
| 	void (*iop_pin)(xfs_log_item_t *); | ||||
| 	void (*iop_unpin)(xfs_log_item_t *, int remove); | ||||
| 	uint (*iop_push)(struct xfs_log_item *, struct list_head *); | ||||
|  | ||||
| @ -295,8 +295,8 @@ xfs_trans_mod_dquot( | ||||
| /*
 | ||||
|  * Given an array of dqtrx structures, lock all the dquots associated and join | ||||
|  * them to the transaction, provided they have been modified.  We know that the | ||||
|  * highest number of dquots of one type - usr, grp OR prj - involved in a | ||||
|  * transaction is 2 so we don't need to make this very generic. | ||||
|  * highest number of dquots of one type - usr, grp and prj - involved in a | ||||
|  * transaction is 3 so we don't need to make this very generic. | ||||
|  */ | ||||
| STATIC void | ||||
| xfs_trans_dqlockedjoin( | ||||
|  | ||||
| @ -174,7 +174,7 @@ xfs_calc_itruncate_reservation( | ||||
| 		    xfs_calc_buf_res(5, 0) + | ||||
| 		    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), | ||||
| 				     XFS_FSB_TO_B(mp, 1)) + | ||||
| 		    xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + | ||||
| 		    xfs_calc_buf_res(2 + mp->m_ialloc_blks + | ||||
| 				     mp->m_in_maxlevels, 0))); | ||||
| } | ||||
| 
 | ||||
| @ -282,7 +282,7 @@ xfs_calc_create_resv_modify( | ||||
|  * For create we can allocate some inodes giving: | ||||
|  *    the agi and agf of the ag getting the new inodes: 2 * sectorsize | ||||
|  *    the superblock for the nlink flag: sector size | ||||
|  *    the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize | ||||
|  *    the inode blocks allocated: mp->m_ialloc_blks * blocksize | ||||
|  *    the inode btree: max depth * blocksize | ||||
|  *    the allocation btrees: 2 trees * (max depth - 1) * block size | ||||
|  */ | ||||
| @ -292,7 +292,7 @@ xfs_calc_create_resv_alloc( | ||||
| { | ||||
| 	return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + | ||||
| 		mp->m_sb.sb_sectsize + | ||||
| 		xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp), XFS_FSB_TO_B(mp, 1)) + | ||||
| 		xfs_calc_buf_res(mp->m_ialloc_blks, XFS_FSB_TO_B(mp, 1)) + | ||||
| 		xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) + | ||||
| 		xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), | ||||
| 				 XFS_FSB_TO_B(mp, 1)); | ||||
| @ -385,9 +385,9 @@ xfs_calc_ifree_reservation( | ||||
| 		xfs_calc_inode_res(mp, 1) + | ||||
| 		xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + | ||||
| 		xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) + | ||||
| 		max_t(uint, XFS_FSB_TO_B(mp, 1), XFS_INODE_CLUSTER_SIZE(mp)) + | ||||
| 		max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size) + | ||||
| 		xfs_calc_buf_res(1, 0) + | ||||
| 		xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + | ||||
| 		xfs_calc_buf_res(2 + mp->m_ialloc_blks + | ||||
| 				 mp->m_in_maxlevels, 0) + | ||||
| 		xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), | ||||
| 				 XFS_FSB_TO_B(mp, 1)); | ||||
|  | ||||
| @ -47,7 +47,7 @@ | ||||
| #define	XFS_DIRREMOVE_SPACE_RES(mp)	\ | ||||
| 	XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) | ||||
| #define	XFS_IALLOC_SPACE_RES(mp)	\ | ||||
| 	(XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels - 1) | ||||
| 	((mp)->m_ialloc_blks + (mp)->m_in_maxlevels - 1) | ||||
| 
 | ||||
| /*
 | ||||
|  * Space reservation values for various transactions. | ||||
|  | ||||
| @ -34,15 +34,6 @@ struct attrlist_cursor_kern; | ||||
| 	{ IO_ISDIRECT,	"DIRECT" }, \ | ||||
| 	{ IO_INVIS,	"INVIS"} | ||||
| 
 | ||||
| /*
 | ||||
|  * Flush/Invalidate options for vop_toss/flush/flushinval_pages. | ||||
|  */ | ||||
| #define FI_NONE			0	/* none */ | ||||
| #define FI_REMAPF		1	/* Do a remapf prior to the operation */ | ||||
| #define FI_REMAPF_LOCKED	2	/* Do a remapf prior to the operation. | ||||
| 					   Prevent VM access to the pages until | ||||
| 					   the operation completes. */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Some useful predicates. | ||||
|  */ | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user