2018-06-06 02:42:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2013-08-12 10:49:42 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
2013-08-12 10:49:45 +00:00
|
|
|
* Copyright (c) 2012 Red Hat, Inc.
|
2013-08-12 10:49:42 +00:00
|
|
|
* All Rights Reserved.
|
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2013-10-22 23:36:05 +00:00
|
|
|
#include "xfs_shared.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2013-08-12 10:49:42 +00:00
|
|
|
#include "xfs_bit.h"
|
|
|
|
#include "xfs_mount.h"
|
2013-10-14 22:17:51 +00:00
|
|
|
#include "xfs_da_format.h"
|
2016-08-03 01:15:38 +00:00
|
|
|
#include "xfs_defer.h"
|
2013-08-12 10:49:42 +00:00
|
|
|
#include "xfs_inode.h"
|
|
|
|
#include "xfs_btree.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_trans.h"
|
2013-08-12 10:49:42 +00:00
|
|
|
#include "xfs_extfree_item.h"
|
|
|
|
#include "xfs_alloc.h"
|
|
|
|
#include "xfs_bmap.h"
|
|
|
|
#include "xfs_bmap_util.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_bmap_btree.h"
|
2013-08-12 10:49:42 +00:00
|
|
|
#include "xfs_rtalloc.h"
|
|
|
|
#include "xfs_error.h"
|
|
|
|
#include "xfs_quota.h"
|
|
|
|
#include "xfs_trans_space.h"
|
|
|
|
#include "xfs_trace.h"
|
2013-08-12 10:49:45 +00:00
|
|
|
#include "xfs_icache.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_log.h"
|
xfs: propagate bmap updates to rmapbt
When we map, unmap, or convert an extent in a file's data or attr
fork, schedule a respective update in the rmapbt. Previous versions
of this patch required a 1:1 correspondence between bmap and rmap,
but this is no longer true as we now have ability to make interval
queries against the rmapbt.
We use the deferred operations code to handle redo operations
atomically and deadlock free. This plumbs in all five rmap actions
(map, unmap, convert extent, alloc, free); we'll use the first three
now for file data, and reflink will want the last two. We also add
an error injection site to test log recovery.
Finally, we need to fix the bmap shift extent code to adjust the
rmaps correctly.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-08-03 02:16:05 +00:00
|
|
|
#include "xfs_rmap_btree.h"
|
2016-10-03 16:11:41 +00:00
|
|
|
#include "xfs_iomap.h"
|
|
|
|
#include "xfs_reflink.h"
|
|
|
|
#include "xfs_refcount.h"
|
2013-08-12 10:49:42 +00:00
|
|
|
|
|
|
|
/* Kernel only BMAP related definitions and functions */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert the given file system block to a disk block. We have to treat it
|
|
|
|
* differently based on whether the file is a real time file or not, because the
|
|
|
|
* bmap code does.
|
|
|
|
*/
|
|
|
|
xfs_daddr_t
|
|
|
|
xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
|
|
|
|
{
|
|
|
|
return (XFS_IS_REALTIME_INODE(ip) ? \
|
|
|
|
(xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
|
|
|
|
XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
|
|
|
|
}
|
|
|
|
|
2015-11-03 01:27:22 +00:00
|
|
|
/*
|
|
|
|
* Routine to zero an extent on disk allocated to the specific inode.
|
|
|
|
*
|
|
|
|
* The VFS functions take a linearised filesystem block offset, so we have to
|
|
|
|
* convert the sparse xfs fsb to the right format first.
|
|
|
|
* VFS types are real funky, too.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_zero_extent(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_fsblock_t start_fsb,
|
|
|
|
xfs_off_t count_fsb)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
|
|
|
|
sector_t block = XFS_BB_TO_FSBT(mp, sector);
|
|
|
|
|
2016-03-15 17:20:41 +00:00
|
|
|
return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
|
|
|
|
block << (mp->m_super->s_blocksize_bits - 9),
|
|
|
|
count_fsb << (mp->m_super->s_blocksize_bits - 9),
|
2017-04-05 17:21:08 +00:00
|
|
|
GFP_NOFS, 0);
|
2015-11-03 01:27:22 +00:00
|
|
|
}
|
|
|
|
|
2017-10-09 18:37:22 +00:00
|
|
|
#ifdef CONFIG_XFS_RT
|
2013-08-12 10:49:42 +00:00
|
|
|
int
|
|
|
|
xfs_bmap_rtalloc(
|
|
|
|
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
|
|
|
|
{
|
|
|
|
int error; /* error return value */
|
|
|
|
xfs_mount_t *mp; /* mount point structure */
|
|
|
|
xfs_extlen_t prod = 0; /* product factor for allocators */
|
2018-06-08 16:54:22 +00:00
|
|
|
xfs_extlen_t mod = 0; /* product factor for allocators */
|
2013-08-12 10:49:42 +00:00
|
|
|
xfs_extlen_t ralen = 0; /* realtime allocation length */
|
|
|
|
xfs_extlen_t align; /* minimum allocation alignment */
|
|
|
|
xfs_rtblock_t rtb;
|
|
|
|
|
|
|
|
mp = ap->ip->i_mount;
|
|
|
|
align = xfs_get_extsz_hint(ap->ip);
|
|
|
|
prod = align / mp->m_sb.sb_rextsize;
|
|
|
|
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
|
|
|
|
align, 1, ap->eof, 0,
|
|
|
|
ap->conv, &ap->offset, &ap->length);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
ASSERT(ap->length);
|
|
|
|
ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the offset & length are not perfectly aligned
|
|
|
|
* then kill prod, it will just get us in trouble.
|
|
|
|
*/
|
2018-06-08 16:54:22 +00:00
|
|
|
div_u64_rem(ap->offset, align, &mod);
|
|
|
|
if (mod || ap->length % align)
|
2013-08-12 10:49:42 +00:00
|
|
|
prod = 1;
|
|
|
|
/*
|
|
|
|
* Set ralen to be the actual requested length in rtextents.
|
|
|
|
*/
|
|
|
|
ralen = ap->length / mp->m_sb.sb_rextsize;
|
|
|
|
/*
|
|
|
|
* If the old value was close enough to MAXEXTLEN that
|
|
|
|
* we rounded up to it, cut it back so it's valid again.
|
|
|
|
* Note that if it's a really large request (bigger than
|
|
|
|
* MAXEXTLEN), we don't hear about that number, and can't
|
|
|
|
* adjust the starting point to match it.
|
|
|
|
*/
|
|
|
|
if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
|
|
|
|
ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
|
|
|
|
|
|
|
|
/*
|
2016-02-07 23:46:51 +00:00
|
|
|
* Lock out modifications to both the RT bitmap and summary inodes
|
2013-08-12 10:49:42 +00:00
|
|
|
*/
|
2016-08-03 01:00:42 +00:00
|
|
|
xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
|
2013-08-12 10:49:42 +00:00
|
|
|
xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
|
2016-08-03 01:00:42 +00:00
|
|
|
xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
|
2016-02-07 23:46:51 +00:00
|
|
|
xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
|
2013-08-12 10:49:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If it's an allocation to an empty file at offset 0,
|
|
|
|
* pick an extent that will space things out in the rt area.
|
|
|
|
*/
|
|
|
|
if (ap->eof && ap->offset == 0) {
|
|
|
|
xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
|
|
|
|
|
|
|
|
error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
ap->blkno = rtx * mp->m_sb.sb_rextsize;
|
|
|
|
} else {
|
|
|
|
ap->blkno = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_bmap_adjacent(ap);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Realtime allocation, done through xfs_rtallocate_extent.
|
|
|
|
*/
|
|
|
|
do_div(ap->blkno, mp->m_sb.sb_rextsize);
|
|
|
|
rtb = ap->blkno;
|
|
|
|
ap->length = ralen;
|
2017-02-17 16:21:06 +00:00
|
|
|
error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
|
|
|
|
&ralen, ap->wasdel, prod, &rtb);
|
|
|
|
if (error)
|
2013-08-12 10:49:42 +00:00
|
|
|
return error;
|
2017-02-17 16:21:06 +00:00
|
|
|
|
2013-08-12 10:49:42 +00:00
|
|
|
ap->blkno = rtb;
|
|
|
|
if (ap->blkno != NULLFSBLOCK) {
|
|
|
|
ap->blkno *= mp->m_sb.sb_rextsize;
|
|
|
|
ralen *= mp->m_sb.sb_rextsize;
|
|
|
|
ap->length = ralen;
|
|
|
|
ap->ip->i_d.di_nblocks += ralen;
|
|
|
|
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
|
|
|
|
if (ap->wasdel)
|
|
|
|
ap->ip->i_delayed_blks -= ralen;
|
|
|
|
/*
|
|
|
|
* Adjust the disk quota also. This was reserved
|
|
|
|
* earlier.
|
|
|
|
*/
|
|
|
|
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
|
|
|
|
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
|
|
|
|
XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
|
2015-11-03 01:27:22 +00:00
|
|
|
|
|
|
|
/* Zero the extent if we were asked to do so */
|
xfs: remote attribute blocks aren't really userdata
When adding a new remote attribute, we write the attribute to the
new extent before the allocation transaction is committed. This
means we cannot reuse busy extents as that violates crash
consistency semantics. Hence we currently treat remote attribute
extent allocation like userdata because it has the same overwrite
ordering constraints as userdata.
Unfortunately, this also allows the allocator to incorrectly apply
extent size hints to the remote attribute extent allocation. This
results in interesting failures, such as transaction block
reservation overruns and in-memory inode attribute fork corruption.
To fix this, we need to separate the busy extent reuse configuration
from the userdata configuration. This changes the definition of
XFS_BMAPI_METADATA slightly - it now means that allocation is
metadata and reuse of busy extents is acceptible due to the metadata
ordering semantics of the journal. If this flag is not set, it
means the allocation is that has unordered data writeback, and hence
busy extent reuse is not allowed. It no longer implies the
allocation is for user data, just that the data write will not be
strictly ordered. This matches the semantics for both user data
and remote attribute block allocation.
As such, This patch changes the "userdata" field to a "datatype"
field, and adds a "no busy reuse" flag to the field.
When we detect an unordered data extent allocation, we immediately set
the no reuse flag. We then set the "user data" flags based on the
inode fork we are allocating the extent to. Hence we only set
userdata flags on data fork allocations now and consider attribute
fork remote extents to be an unordered metadata extent.
The result is that remote attribute extents now have the expected
allocation semantics, and the data fork allocation behaviour is
completely unchanged.
It should be noted that there may be other ways to fix this (e.g.
use ordered metadata buffers for the remote attribute extent data
write) but they are more invasive and difficult to validate both
from a design and implementation POV. Hence this patch takes the
simple, obvious route to fixing the problem...
Reported-and-tested-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-09-25 22:21:28 +00:00
|
|
|
if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
|
2015-11-03 01:27:22 +00:00
|
|
|
error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
2013-08-12 10:49:42 +00:00
|
|
|
} else {
|
|
|
|
ap->length = 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2017-10-09 18:37:22 +00:00
|
|
|
#endif /* CONFIG_XFS_RT */
|
2013-08-12 10:49:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the endoff is outside the last extent. If so the caller will grow
|
|
|
|
* the allocation to a stripe unit boundary. All offsets are considered outside
|
|
|
|
* the end of file for an empty fork, so 1 is returned in *eof in that case.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_bmap_eof(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_fileoff_t endoff,
|
|
|
|
int whichfork,
|
|
|
|
int *eof)
|
|
|
|
{
|
|
|
|
struct xfs_bmbt_irec rec;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
|
|
|
|
if (error || *eof)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
*eof = endoff >= rec.br_startoff + rec.br_blockcount;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extent tree block counting routines.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2017-06-16 18:00:12 +00:00
|
|
|
* Count leaf blocks given a range of extent records. Delayed allocation
|
|
|
|
* extents are not counted towards the totals.
|
2013-08-12 10:49:42 +00:00
|
|
|
*/
|
2017-08-29 22:44:14 +00:00
|
|
|
xfs_extnum_t
|
2013-08-12 10:49:42 +00:00
|
|
|
xfs_bmap_count_leaves(
|
2017-06-16 18:00:12 +00:00
|
|
|
struct xfs_ifork *ifp,
|
2017-06-16 18:00:12 +00:00
|
|
|
xfs_filblks_t *count)
|
2013-08-12 10:49:42 +00:00
|
|
|
{
|
2017-11-03 17:34:43 +00:00
|
|
|
struct xfs_iext_cursor icur;
|
2017-08-29 22:44:14 +00:00
|
|
|
struct xfs_bmbt_irec got;
|
2017-11-03 17:34:43 +00:00
|
|
|
xfs_extnum_t numrecs = 0;
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-11-03 17:34:43 +00:00
|
|
|
for_each_xfs_iext(ifp, &icur, &got) {
|
2017-08-29 22:44:14 +00:00
|
|
|
if (!isnullstartblock(got.br_startblock)) {
|
|
|
|
*count += got.br_blockcount;
|
|
|
|
numrecs++;
|
2017-06-16 18:00:12 +00:00
|
|
|
}
|
2013-08-12 10:49:42 +00:00
|
|
|
}
|
2017-11-03 17:34:43 +00:00
|
|
|
|
2017-08-29 22:44:14 +00:00
|
|
|
return numrecs;
|
2013-08-12 10:49:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Count leaf blocks given a range of extent records originally
|
|
|
|
* in btree format.
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_bmap_disk_count_leaves(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_btree_block *block,
|
|
|
|
int numrecs,
|
2017-06-16 18:00:12 +00:00
|
|
|
xfs_filblks_t *count)
|
2013-08-12 10:49:42 +00:00
|
|
|
{
|
|
|
|
int b;
|
|
|
|
xfs_bmbt_rec_t *frp;
|
|
|
|
|
|
|
|
for (b = 1; b <= numrecs; b++) {
|
|
|
|
frp = XFS_BMBT_REC_ADDR(mp, block, b);
|
|
|
|
*count += xfs_bmbt_disk_get_blockcount(frp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Recursively walks each level of a btree
|
2013-08-12 03:14:52 +00:00
|
|
|
* to count total fsblocks in use.
|
2013-08-12 10:49:42 +00:00
|
|
|
*/
|
2017-06-16 18:00:12 +00:00
|
|
|
STATIC int
|
2013-08-12 10:49:42 +00:00
|
|
|
xfs_bmap_count_tree(
|
2017-06-16 18:00:12 +00:00
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_ifork *ifp,
|
|
|
|
xfs_fsblock_t blockno,
|
|
|
|
int levelin,
|
|
|
|
xfs_extnum_t *nextents,
|
|
|
|
xfs_filblks_t *count)
|
2013-08-12 10:49:42 +00:00
|
|
|
{
|
|
|
|
int error;
|
2017-06-16 18:00:12 +00:00
|
|
|
struct xfs_buf *bp, *nbp;
|
2013-08-12 10:49:42 +00:00
|
|
|
int level = levelin;
|
|
|
|
__be64 *pp;
|
|
|
|
xfs_fsblock_t bno = blockno;
|
|
|
|
xfs_fsblock_t nextbno;
|
|
|
|
struct xfs_btree_block *block, *nextblock;
|
|
|
|
int numrecs;
|
|
|
|
|
|
|
|
error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
|
|
|
|
&xfs_bmbt_buf_ops);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
*count += 1;
|
|
|
|
block = XFS_BUF_TO_BLOCK(bp);
|
|
|
|
|
|
|
|
if (--level) {
|
|
|
|
/* Not at node above leaves, count this level of nodes */
|
|
|
|
nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
|
|
|
|
while (nextbno != NULLFSBLOCK) {
|
|
|
|
error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
|
|
|
|
XFS_BMAP_BTREE_REF,
|
|
|
|
&xfs_bmbt_buf_ops);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
*count += 1;
|
|
|
|
nextblock = XFS_BUF_TO_BLOCK(nbp);
|
|
|
|
nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
|
|
|
|
xfs_trans_brelse(tp, nbp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Dive to the next level */
|
|
|
|
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
|
|
|
|
bno = be64_to_cpu(*pp);
|
2017-06-16 18:00:12 +00:00
|
|
|
error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
|
|
|
|
count);
|
|
|
|
if (error) {
|
2013-08-12 10:49:42 +00:00
|
|
|
xfs_trans_brelse(tp, bp);
|
|
|
|
XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
|
|
|
|
XFS_ERRLEVEL_LOW, mp);
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EFSCORRUPTED;
|
2013-08-12 10:49:42 +00:00
|
|
|
}
|
|
|
|
xfs_trans_brelse(tp, bp);
|
|
|
|
} else {
|
|
|
|
/* count all level 1 nodes and their leaves */
|
|
|
|
for (;;) {
|
|
|
|
nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
|
|
|
|
numrecs = be16_to_cpu(block->bb_numrecs);
|
2017-06-16 18:00:12 +00:00
|
|
|
(*nextents) += numrecs;
|
2013-08-12 10:49:42 +00:00
|
|
|
xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
|
|
|
|
xfs_trans_brelse(tp, bp);
|
|
|
|
if (nextbno == NULLFSBLOCK)
|
|
|
|
break;
|
|
|
|
bno = nextbno;
|
|
|
|
error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
|
|
|
|
XFS_BMAP_BTREE_REF,
|
|
|
|
&xfs_bmbt_buf_ops);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
*count += 1;
|
|
|
|
block = XFS_BUF_TO_BLOCK(bp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-06-16 18:00:12 +00:00
|
|
|
* Count fsblocks of the given fork. Delayed allocation extents are
|
|
|
|
* not counted towards the totals.
|
2013-08-12 10:49:42 +00:00
|
|
|
*/
|
2017-06-16 18:00:12 +00:00
|
|
|
int
|
2013-08-12 10:49:42 +00:00
|
|
|
xfs_bmap_count_blocks(
|
2017-06-16 18:00:12 +00:00
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int whichfork,
|
|
|
|
xfs_extnum_t *nextents,
|
|
|
|
xfs_filblks_t *count)
|
2013-08-12 10:49:42 +00:00
|
|
|
{
|
2017-06-16 18:00:12 +00:00
|
|
|
struct xfs_mount *mp; /* file system mount structure */
|
|
|
|
__be64 *pp; /* pointer to block address */
|
2013-08-12 10:49:42 +00:00
|
|
|
struct xfs_btree_block *block; /* current btree block */
|
2017-06-16 18:00:12 +00:00
|
|
|
struct xfs_ifork *ifp; /* fork structure */
|
2013-08-12 10:49:42 +00:00
|
|
|
xfs_fsblock_t bno; /* block # of "block" */
|
|
|
|
int level; /* btree level, for checking */
|
2017-06-16 18:00:12 +00:00
|
|
|
int error;
|
2013-08-12 10:49:42 +00:00
|
|
|
|
|
|
|
bno = NULLFSBLOCK;
|
|
|
|
mp = ip->i_mount;
|
2017-06-16 18:00:12 +00:00
|
|
|
*nextents = 0;
|
|
|
|
*count = 0;
|
2013-08-12 10:49:42 +00:00
|
|
|
ifp = XFS_IFORK_PTR(ip, whichfork);
|
2017-06-16 18:00:12 +00:00
|
|
|
if (!ifp)
|
2013-08-12 10:49:42 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-06-16 18:00:12 +00:00
|
|
|
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2017-08-29 22:44:14 +00:00
|
|
|
*nextents = xfs_bmap_count_leaves(ifp, count);
|
2017-06-16 18:00:12 +00:00
|
|
|
return 0;
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
|
|
|
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
|
|
|
error = xfs_iread_extents(tp, ip, whichfork);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
|
|
|
|
*/
|
|
|
|
block = ifp->if_broot;
|
|
|
|
level = be16_to_cpu(block->bb_level);
|
|
|
|
ASSERT(level > 0);
|
|
|
|
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
|
|
|
|
bno = be64_to_cpu(*pp);
|
|
|
|
ASSERT(bno != NULLFSBLOCK);
|
|
|
|
ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
|
|
|
|
ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
|
|
|
|
|
|
|
|
error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
|
|
|
|
nextents, count);
|
|
|
|
if (error) {
|
|
|
|
XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
|
|
|
|
XFS_ERRLEVEL_LOW, mp);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
return 0;
|
2013-08-12 10:49:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
static int
|
|
|
|
xfs_getbmap_report_one(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct getbmapx *bmv,
|
2017-10-17 21:16:19 +00:00
|
|
|
struct kgetbmap *out,
|
2017-10-17 21:16:18 +00:00
|
|
|
int64_t bmv_end,
|
|
|
|
struct xfs_bmbt_irec *got)
|
2016-10-03 16:11:41 +00:00
|
|
|
{
|
2017-10-17 21:16:19 +00:00
|
|
|
struct kgetbmap *p = out + bmv->bmv_entries;
|
2018-10-18 06:19:48 +00:00
|
|
|
bool shared = false;
|
2017-10-17 21:16:18 +00:00
|
|
|
int error;
|
2016-10-03 16:11:41 +00:00
|
|
|
|
2018-10-18 06:19:48 +00:00
|
|
|
error = xfs_reflink_trim_around_shared(ip, got, &shared);
|
2016-10-03 16:11:41 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
if (isnullstartblock(got->br_startblock) ||
|
|
|
|
got->br_startblock == DELAYSTARTBLOCK) {
|
2016-10-03 16:11:41 +00:00
|
|
|
/*
|
2017-10-17 21:16:18 +00:00
|
|
|
* Delalloc extents that start beyond EOF can occur due to
|
|
|
|
* speculative EOF allocation when the delalloc extent is larger
|
|
|
|
* than the largest freespace extent at conversion time. These
|
|
|
|
* extents cannot be converted by data writeback, so can exist
|
|
|
|
* here even if we are not supposed to be finding delalloc
|
|
|
|
* extents.
|
2016-10-03 16:11:41 +00:00
|
|
|
*/
|
2017-10-17 21:16:18 +00:00
|
|
|
if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
|
|
|
|
ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
|
|
|
|
|
|
|
|
p->bmv_oflags |= BMV_OF_DELALLOC;
|
|
|
|
p->bmv_block = -2;
|
2016-10-03 16:11:41 +00:00
|
|
|
} else {
|
2017-10-17 21:16:18 +00:00
|
|
|
p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
|
2016-10-03 16:11:41 +00:00
|
|
|
}
|
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
if (got->br_state == XFS_EXT_UNWRITTEN &&
|
|
|
|
(bmv->bmv_iflags & BMV_IF_PREALLOC))
|
|
|
|
p->bmv_oflags |= BMV_OF_PREALLOC;
|
|
|
|
|
|
|
|
if (shared)
|
|
|
|
p->bmv_oflags |= BMV_OF_SHARED;
|
|
|
|
|
|
|
|
p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
|
|
|
|
p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
|
|
|
|
|
|
|
|
bmv->bmv_offset = p->bmv_offset + p->bmv_length;
|
|
|
|
bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
|
|
|
|
bmv->bmv_entries++;
|
2016-10-03 16:11:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
static void
|
|
|
|
xfs_getbmap_report_hole(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct getbmapx *bmv,
|
2017-10-17 21:16:19 +00:00
|
|
|
struct kgetbmap *out,
|
2017-10-17 21:16:18 +00:00
|
|
|
int64_t bmv_end,
|
|
|
|
xfs_fileoff_t bno,
|
|
|
|
xfs_fileoff_t end)
|
|
|
|
{
|
2017-10-17 21:16:19 +00:00
|
|
|
struct kgetbmap *p = out + bmv->bmv_entries;
|
2017-10-17 21:16:18 +00:00
|
|
|
|
|
|
|
if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
|
|
|
|
return;
|
|
|
|
|
|
|
|
p->bmv_block = -1;
|
|
|
|
p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
|
|
|
|
p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
|
|
|
|
|
|
|
|
bmv->bmv_offset = p->bmv_offset + p->bmv_length;
|
|
|
|
bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
|
|
|
|
bmv->bmv_entries++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
xfs_getbmap_full(
|
|
|
|
struct getbmapx *bmv)
|
|
|
|
{
|
|
|
|
return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
xfs_getbmap_next_rec(
|
|
|
|
struct xfs_bmbt_irec *rec,
|
|
|
|
xfs_fileoff_t total_end)
|
|
|
|
{
|
|
|
|
xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
|
|
|
|
|
|
|
|
if (end == total_end)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
rec->br_startoff += rec->br_blockcount;
|
|
|
|
if (!isnullstartblock(rec->br_startblock) &&
|
|
|
|
rec->br_startblock != DELAYSTARTBLOCK)
|
|
|
|
rec->br_startblock += rec->br_blockcount;
|
|
|
|
rec->br_blockcount = total_end - end;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-08-12 10:49:42 +00:00
|
|
|
/*
|
|
|
|
* Get inode's extents as described in bmv, and format for output.
|
|
|
|
* Calls formatter to fill the user's buffer until all extents
|
|
|
|
* are mapped, until the passed-in bmv->bmv_count slots have
|
|
|
|
* been filled, or until the formatter short-circuits the loop,
|
|
|
|
* if it is tracking filled-in extents on its own.
|
|
|
|
*/
|
|
|
|
int /* error code */
|
|
|
|
xfs_getbmap(
|
2017-10-17 21:16:19 +00:00
|
|
|
struct xfs_inode *ip,
|
2013-08-12 10:49:42 +00:00
|
|
|
struct getbmapx *bmv, /* user bmap structure */
|
2017-10-17 21:16:19 +00:00
|
|
|
struct kgetbmap *out)
|
2013-08-12 10:49:42 +00:00
|
|
|
{
|
2017-10-17 21:16:18 +00:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
int iflags = bmv->bmv_iflags;
|
2017-10-17 21:16:19 +00:00
|
|
|
int whichfork, lock, error = 0;
|
2017-10-17 21:16:18 +00:00
|
|
|
int64_t bmv_end, max_len;
|
|
|
|
xfs_fileoff_t bno, first_bno;
|
|
|
|
struct xfs_ifork *ifp;
|
|
|
|
struct xfs_bmbt_irec got, rec;
|
|
|
|
xfs_filblks_t len;
|
2017-11-03 17:34:43 +00:00
|
|
|
struct xfs_iext_cursor icur;
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-10-17 21:16:19 +00:00
|
|
|
if (bmv->bmv_iflags & ~BMV_IF_VALID)
|
|
|
|
return -EINVAL;
|
2016-10-03 16:11:41 +00:00
|
|
|
#ifndef DEBUG
|
|
|
|
/* Only allow CoW fork queries if we're debugging. */
|
|
|
|
if (iflags & BMV_IF_COWFORK)
|
|
|
|
return -EINVAL;
|
|
|
|
#endif
|
|
|
|
if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
if (bmv->bmv_length < -1)
|
|
|
|
return -EINVAL;
|
|
|
|
bmv->bmv_entries = 0;
|
|
|
|
if (bmv->bmv_length == 0)
|
|
|
|
return 0;
|
|
|
|
|
2016-10-03 16:11:41 +00:00
|
|
|
if (iflags & BMV_IF_ATTRFORK)
|
|
|
|
whichfork = XFS_ATTR_FORK;
|
|
|
|
else if (iflags & BMV_IF_COWFORK)
|
|
|
|
whichfork = XFS_COW_FORK;
|
|
|
|
else
|
|
|
|
whichfork = XFS_DATA_FORK;
|
2017-10-17 21:16:18 +00:00
|
|
|
ifp = XFS_IFORK_PTR(ip, whichfork);
|
2016-10-03 16:11:41 +00:00
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
xfs_ilock(ip, XFS_IOLOCK_SHARED);
|
2016-10-03 16:11:41 +00:00
|
|
|
switch (whichfork) {
|
|
|
|
case XFS_ATTR_FORK:
|
2017-10-17 21:16:18 +00:00
|
|
|
if (!XFS_IFORK_Q(ip))
|
|
|
|
goto out_unlock_iolock;
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
max_len = 1LL << 32;
|
|
|
|
lock = xfs_ilock_attr_map_shared(ip);
|
2016-10-03 16:11:41 +00:00
|
|
|
break;
|
|
|
|
case XFS_COW_FORK:
|
2017-10-17 21:16:18 +00:00
|
|
|
/* No CoW fork? Just return */
|
|
|
|
if (!ifp)
|
|
|
|
goto out_unlock_iolock;
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
if (xfs_get_cowextsz_hint(ip))
|
|
|
|
max_len = mp->m_super->s_maxbytes;
|
|
|
|
else
|
|
|
|
max_len = XFS_ISIZE(ip);
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
lock = XFS_ILOCK_SHARED;
|
|
|
|
xfs_ilock(ip, lock);
|
|
|
|
break;
|
2016-10-03 16:11:41 +00:00
|
|
|
case XFS_DATA_FORK:
|
2013-12-18 10:14:39 +00:00
|
|
|
if (!(iflags & BMV_IF_DELALLOC) &&
|
|
|
|
(ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
|
2014-06-25 04:58:08 +00:00
|
|
|
error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
|
2013-08-12 10:49:42 +00:00
|
|
|
if (error)
|
|
|
|
goto out_unlock_iolock;
|
2013-12-18 10:14:39 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Even after flushing the inode, there can still be
|
|
|
|
* delalloc blocks on the inode beyond EOF due to
|
|
|
|
* speculative preallocation. These are not removed
|
|
|
|
* until the release function is called or the inode
|
|
|
|
* is inactivated. Hence we cannot assert here that
|
|
|
|
* ip->i_delayed_blks == 0.
|
|
|
|
*/
|
2013-08-12 10:49:42 +00:00
|
|
|
}
|
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
if (xfs_get_extsz_hint(ip) ||
|
|
|
|
(ip->i_d.di_flags &
|
|
|
|
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
|
|
|
|
max_len = mp->m_super->s_maxbytes;
|
|
|
|
else
|
|
|
|
max_len = XFS_ISIZE(ip);
|
|
|
|
|
2013-12-18 10:14:39 +00:00
|
|
|
lock = xfs_ilock_data_map_shared(ip);
|
2016-10-03 16:11:41 +00:00
|
|
|
break;
|
2013-12-18 10:14:39 +00:00
|
|
|
}
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
|
|
|
/* Local format inode forks report no extents. */
|
2013-08-12 10:49:42 +00:00
|
|
|
goto out_unlock_ilock;
|
2017-10-17 21:16:18 +00:00
|
|
|
default:
|
|
|
|
error = -EINVAL;
|
|
|
|
goto out_unlock_ilock;
|
|
|
|
}
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
if (bmv->bmv_length == -1) {
|
|
|
|
max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
|
|
|
|
bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
|
2013-08-12 10:49:42 +00:00
|
|
|
}
|
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
bmv_end = bmv->bmv_offset + bmv->bmv_length;
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
|
|
|
|
len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
|
|
|
error = xfs_iread_extents(NULL, ip, whichfork);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock_ilock;
|
|
|
|
}
|
2016-10-03 16:11:41 +00:00
|
|
|
|
2017-11-03 17:34:43 +00:00
|
|
|
if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
|
2017-10-17 21:16:18 +00:00
|
|
|
/*
|
|
|
|
* Report a whole-file hole if the delalloc flag is set to
|
|
|
|
* stay compatible with the old implementation.
|
|
|
|
*/
|
|
|
|
if (iflags & BMV_IF_DELALLOC)
|
|
|
|
xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
|
|
|
|
XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
|
|
|
|
goto out_unlock_ilock;
|
|
|
|
}
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
while (!xfs_getbmap_full(bmv)) {
|
|
|
|
xfs_trim_extent(&got, first_bno, len);
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
/*
|
|
|
|
* Report an entry for a hole if this extent doesn't directly
|
|
|
|
* follow the previous one.
|
|
|
|
*/
|
|
|
|
if (got.br_startoff > bno) {
|
|
|
|
xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
|
|
|
|
got.br_startoff);
|
|
|
|
if (xfs_getbmap_full(bmv))
|
|
|
|
break;
|
|
|
|
}
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
/*
|
|
|
|
* In order to report shared extents accurately, we report each
|
|
|
|
* distinct shared / unshared part of a single bmbt record with
|
|
|
|
* an individual getbmapx record.
|
|
|
|
*/
|
|
|
|
bno = got.br_startoff + got.br_blockcount;
|
|
|
|
rec = got;
|
|
|
|
do {
|
|
|
|
error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
|
|
|
|
&rec);
|
|
|
|
if (error || xfs_getbmap_full(bmv))
|
|
|
|
goto out_unlock_ilock;
|
|
|
|
} while (xfs_getbmap_next_rec(&rec, bno));
|
|
|
|
|
2017-11-03 17:34:43 +00:00
|
|
|
if (!xfs_iext_next_extent(ifp, &icur, &got)) {
|
2017-10-17 21:16:18 +00:00
|
|
|
xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
|
|
|
|
|
|
|
|
out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
|
|
|
|
|
|
|
|
if (whichfork != XFS_ATTR_FORK && bno < end &&
|
|
|
|
!xfs_getbmap_full(bmv)) {
|
|
|
|
xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
|
|
|
|
bno, end);
|
2017-01-26 17:50:30 +00:00
|
|
|
}
|
2017-10-17 21:16:18 +00:00
|
|
|
break;
|
2013-08-12 10:49:42 +00:00
|
|
|
}
|
|
|
|
|
2017-10-17 21:16:18 +00:00
|
|
|
if (bno >= first_bno + len)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock_ilock:
|
2013-12-06 20:30:08 +00:00
|
|
|
xfs_iunlock(ip, lock);
|
2017-10-17 21:16:18 +00:00
|
|
|
out_unlock_iolock:
|
2013-08-12 10:49:42 +00:00
|
|
|
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-06-22 06:24:38 +00:00
|
|
|
* Dead simple method of punching delalyed allocation blocks from a range in
|
|
|
|
* the inode. This will always punch out both the start and end blocks, even
|
|
|
|
* if the ranges only partially overlap them, so it is up to the caller to
|
|
|
|
* ensure that partial blocks are not passed in.
|
2013-08-12 10:49:42 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_bmap_punch_delalloc_range(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_fileoff_t start_fsb,
|
|
|
|
xfs_fileoff_t length)
|
|
|
|
{
|
2018-06-22 06:24:38 +00:00
|
|
|
struct xfs_ifork *ifp = &ip->i_df;
|
|
|
|
xfs_fileoff_t end_fsb = start_fsb + length;
|
|
|
|
struct xfs_bmbt_irec got, del;
|
|
|
|
struct xfs_iext_cursor icur;
|
2013-08-12 10:49:42 +00:00
|
|
|
int error = 0;
|
|
|
|
|
2018-09-29 03:47:46 +00:00
|
|
|
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2018-09-29 03:47:46 +00:00
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
2018-06-22 06:24:38 +00:00
|
|
|
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
|
2018-07-12 05:25:57 +00:00
|
|
|
goto out_unlock;
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2018-06-22 06:24:38 +00:00
|
|
|
while (got.br_startoff + got.br_blockcount > start_fsb) {
|
|
|
|
del = got;
|
|
|
|
xfs_trim_extent(&del, start_fsb, length);
|
2013-08-12 10:49:42 +00:00
|
|
|
|
|
|
|
/*
|
2018-06-22 06:24:38 +00:00
|
|
|
* A delete can push the cursor forward. Step back to the
|
|
|
|
* previous extent on non-delalloc or extents outside the
|
|
|
|
* target range.
|
2013-08-12 10:49:42 +00:00
|
|
|
*/
|
2018-06-22 06:24:38 +00:00
|
|
|
if (!del.br_blockcount ||
|
|
|
|
!isnullstartblock(del.br_startblock)) {
|
|
|
|
if (!xfs_iext_prev_extent(ifp, &icur, &got))
|
|
|
|
break;
|
|
|
|
continue;
|
|
|
|
}
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2018-06-22 06:24:38 +00:00
|
|
|
error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
|
|
|
|
&got, &del);
|
|
|
|
if (error || !xfs_iext_get_extent(ifp, &icur, &got))
|
|
|
|
break;
|
|
|
|
}
|
2013-08-12 10:49:42 +00:00
|
|
|
|
2018-07-12 05:25:57 +00:00
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2013-08-12 10:49:42 +00:00
|
|
|
return error;
|
|
|
|
}
|
2013-08-12 10:49:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Test whether it is appropriate to check an inode for and free post EOF
|
|
|
|
* blocks. The 'force' parameter determines whether we should also consider
|
|
|
|
* regular files that are marked preallocated or append-only.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
|
|
|
|
{
|
|
|
|
/* prealloc/delalloc exists only on regular files */
|
2016-02-09 05:54:58 +00:00
|
|
|
if (!S_ISREG(VFS_I(ip)->i_mode))
|
2013-08-12 10:49:45 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Zero sized files with no cached pages and delalloc blocks will not
|
|
|
|
* have speculative prealloc/delalloc blocks to remove.
|
|
|
|
*/
|
|
|
|
if (VFS_I(ip)->i_size == 0 &&
|
2014-08-04 03:23:15 +00:00
|
|
|
VFS_I(ip)->i_mapping->nrpages == 0 &&
|
2013-08-12 10:49:45 +00:00
|
|
|
ip->i_delayed_blks == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* If we haven't read in the extent list, then don't do it now. */
|
|
|
|
if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not free real preallocated or append-only files unless the file
|
|
|
|
* has delalloc blocks and we are forced to remove them.
|
|
|
|
*/
|
|
|
|
if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
|
|
|
|
if (!force || ip->i_delayed_blks == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-04-11 17:50:05 +00:00
|
|
|
* This is called to free any blocks beyond eof. The caller must hold
|
|
|
|
* IOLOCK_EXCL unless we are in the inode reclaim path and have the only
|
|
|
|
* reference to the inode.
|
2013-08-12 10:49:45 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_free_eofblocks(
|
2017-01-28 07:22:55 +00:00
|
|
|
struct xfs_inode *ip)
|
2013-08-12 10:49:45 +00:00
|
|
|
{
|
2017-01-28 07:22:55 +00:00
|
|
|
struct xfs_trans *tp;
|
|
|
|
int error;
|
|
|
|
xfs_fileoff_t end_fsb;
|
|
|
|
xfs_fileoff_t last_fsb;
|
|
|
|
xfs_filblks_t map_len;
|
|
|
|
int nimaps;
|
|
|
|
struct xfs_bmbt_irec imap;
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
|
2013-08-12 10:49:45 +00:00
|
|
|
/*
|
|
|
|
* Figure out if there are any blocks beyond the end
|
|
|
|
* of the file. If not, then there is nothing to do.
|
|
|
|
*/
|
|
|
|
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
|
|
|
|
last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
|
|
|
|
if (last_fsb <= end_fsb)
|
|
|
|
return 0;
|
|
|
|
map_len = last_fsb - end_fsb;
|
|
|
|
|
|
|
|
nimaps = 1;
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
|
|
|
error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
|
|
|
|
2017-01-28 07:22:55 +00:00
|
|
|
/*
|
|
|
|
* If there are blocks after the end of file, truncate the file to its
|
|
|
|
* current size to free them up.
|
|
|
|
*/
|
2013-08-12 10:49:45 +00:00
|
|
|
if (!error && (nimaps != 0) &&
|
|
|
|
(imap.br_startblock != HOLESTARTBLOCK ||
|
|
|
|
ip->i_delayed_blks)) {
|
|
|
|
/*
|
|
|
|
* Attach the dquots to the inode up front.
|
|
|
|
*/
|
2018-05-04 22:30:21 +00:00
|
|
|
error = xfs_qm_dqattach(ip);
|
2013-08-12 10:49:45 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2017-01-28 07:22:57 +00:00
|
|
|
/* wait on dio to ensure i_size has settled */
|
|
|
|
inode_dio_wait(VFS_I(ip));
|
|
|
|
|
2016-04-05 23:19:55 +00:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
|
|
|
|
&tp);
|
2013-08-12 10:49:45 +00:00
|
|
|
if (error) {
|
|
|
|
ASSERT(XFS_FORCED_SHUTDOWN(mp));
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not update the on-disk file size. If we update the
|
|
|
|
* on-disk file size and then the system crashes before the
|
|
|
|
* contents of the file are flushed to disk then the files
|
|
|
|
* may be full of holes (ie NULL files bug).
|
|
|
|
*/
|
2018-05-10 16:35:42 +00:00
|
|
|
error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
|
|
|
|
XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
|
2013-08-12 10:49:45 +00:00
|
|
|
if (error) {
|
|
|
|
/*
|
|
|
|
* If we get an error at this point we simply don't
|
|
|
|
* bother truncating the file.
|
|
|
|
*/
|
2015-06-04 03:47:56 +00:00
|
|
|
xfs_trans_cancel(tp);
|
2013-08-12 10:49:45 +00:00
|
|
|
} else {
|
2015-06-04 03:48:08 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
2013-08-12 10:49:45 +00:00
|
|
|
if (!error)
|
|
|
|
xfs_inode_clear_eofblocks_tag(ip);
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2013-10-12 07:55:07 +00:00
|
|
|
int
|
2013-08-12 10:49:45 +00:00
|
|
|
xfs_alloc_file_space(
|
2013-10-12 07:55:07 +00:00
|
|
|
struct xfs_inode *ip,
|
2013-08-12 10:49:45 +00:00
|
|
|
xfs_off_t offset,
|
|
|
|
xfs_off_t len,
|
2013-10-12 07:55:06 +00:00
|
|
|
int alloc_type)
|
2013-08-12 10:49:45 +00:00
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
xfs_off_t count;
|
|
|
|
xfs_filblks_t allocated_fsb;
|
|
|
|
xfs_filblks_t allocatesize_fsb;
|
|
|
|
xfs_extlen_t extsz, temp;
|
|
|
|
xfs_fileoff_t startoffset_fsb;
|
|
|
|
int nimaps;
|
|
|
|
int quota_flag;
|
|
|
|
int rt;
|
|
|
|
xfs_trans_t *tp;
|
|
|
|
xfs_bmbt_irec_t imaps[1], *imapp;
|
|
|
|
uint qblocks, resblks, resrtextents;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
trace_xfs_alloc_file_space(ip);
|
|
|
|
|
|
|
|
if (XFS_FORCED_SHUTDOWN(mp))
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EIO;
|
2013-08-12 10:49:45 +00:00
|
|
|
|
2018-05-04 22:30:21 +00:00
|
|
|
error = xfs_qm_dqattach(ip);
|
2013-08-12 10:49:45 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
if (len <= 0)
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EINVAL;
|
2013-08-12 10:49:45 +00:00
|
|
|
|
|
|
|
rt = XFS_IS_REALTIME_INODE(ip);
|
|
|
|
extsz = xfs_get_extsz_hint(ip);
|
|
|
|
|
|
|
|
count = len;
|
|
|
|
imapp = &imaps[0];
|
|
|
|
nimaps = 1;
|
|
|
|
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
allocatesize_fsb = XFS_B_TO_FSB(mp, count);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate file space until done or until there is an error
|
|
|
|
*/
|
|
|
|
while (allocatesize_fsb && !error) {
|
|
|
|
xfs_fileoff_t s, e;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine space reservations for data/realtime.
|
|
|
|
*/
|
|
|
|
if (unlikely(extsz)) {
|
|
|
|
s = startoffset_fsb;
|
|
|
|
do_div(s, extsz);
|
|
|
|
s *= extsz;
|
|
|
|
e = startoffset_fsb + allocatesize_fsb;
|
2018-06-08 16:54:22 +00:00
|
|
|
div_u64_rem(startoffset_fsb, extsz, &temp);
|
|
|
|
if (temp)
|
2013-08-12 10:49:45 +00:00
|
|
|
e += temp;
|
2018-06-08 16:54:22 +00:00
|
|
|
div_u64_rem(e, extsz, &temp);
|
|
|
|
if (temp)
|
2013-08-12 10:49:45 +00:00
|
|
|
e += extsz - temp;
|
|
|
|
} else {
|
|
|
|
s = 0;
|
|
|
|
e = allocatesize_fsb;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The transaction reservation is limited to a 32-bit block
|
|
|
|
* count, hence we need to limit the number of blocks we are
|
|
|
|
* trying to reserve to avoid an overflow. We can't allocate
|
|
|
|
* more than @nimaps extents, and an extent is limited on disk
|
|
|
|
* to MAXEXTLEN (21 bits), so use that to enforce the limit.
|
|
|
|
*/
|
|
|
|
resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
|
|
|
|
if (unlikely(rt)) {
|
|
|
|
resrtextents = qblocks = resblks;
|
|
|
|
resrtextents /= mp->m_sb.sb_rextsize;
|
|
|
|
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
|
|
|
|
quota_flag = XFS_QMOPT_RES_RTBLKS;
|
|
|
|
} else {
|
|
|
|
resrtextents = 0;
|
|
|
|
resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
|
|
|
|
quota_flag = XFS_QMOPT_RES_REGBLKS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate and setup the transaction.
|
|
|
|
*/
|
2016-04-05 23:19:55 +00:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
|
|
|
|
resrtextents, 0, &tp);
|
|
|
|
|
2013-08-12 10:49:45 +00:00
|
|
|
/*
|
|
|
|
* Check for running out of space
|
|
|
|
*/
|
|
|
|
if (error) {
|
|
|
|
/*
|
|
|
|
* Free the transaction structure.
|
|
|
|
*/
|
2014-06-25 04:58:08 +00:00
|
|
|
ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
|
2013-08-12 10:49:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
|
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
|
|
|
|
0, quota_flag);
|
|
|
|
if (error)
|
|
|
|
goto error1;
|
|
|
|
|
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
|
|
|
|
|
|
|
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
|
2018-07-12 05:26:25 +00:00
|
|
|
allocatesize_fsb, alloc_type, resblks,
|
|
|
|
imapp, &nimaps);
|
xfs: eliminate committed arg from xfs_bmap_finish
Calls to xfs_bmap_finish() and xfs_trans_ijoin(), and the
associated comments were replicated several times across
the attribute code, all dealing with what to do if the
transaction was or wasn't committed.
And in that replicated code, an ASSERT() test of an
uninitialized variable occurs in several locations:
error = xfs_attr_thing(&args);
if (!error) {
error = xfs_bmap_finish(&args.trans, args.flist,
&committed);
}
if (error) {
ASSERT(committed);
If the first xfs_attr_thing() failed, we'd skip the xfs_bmap_finish,
never set "committed", and then test it in the ASSERT.
Fix this up by moving the committed state internal to xfs_bmap_finish,
and add a new inode argument. If an inode is passed in, it is passed
through to __xfs_trans_roll() and joined to the transaction there if
the transaction was committed.
xfs_qm_dqalloc() was a little unique in that it called bjoin rather
than ijoin, but as Dave points out we can detect the committed state
but checking whether (*tpp != tp).
Addresses-Coverity-Id: 102360
Addresses-Coverity-Id: 102361
Addresses-Coverity-Id: 102363
Addresses-Coverity-Id: 102364
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-01-11 00:34:01 +00:00
|
|
|
if (error)
|
2013-08-12 10:49:45 +00:00
|
|
|
goto error0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Complete the transaction
|
|
|
|
*/
|
2015-06-04 03:48:08 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
2013-08-12 10:49:45 +00:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
xfs: eliminate committed arg from xfs_bmap_finish
Calls to xfs_bmap_finish() and xfs_trans_ijoin(), and the
associated comments were replicated several times across
the attribute code, all dealing with what to do if the
transaction was or wasn't committed.
And in that replicated code, an ASSERT() test of an
uninitialized variable occurs in several locations:
error = xfs_attr_thing(&args);
if (!error) {
error = xfs_bmap_finish(&args.trans, args.flist,
&committed);
}
if (error) {
ASSERT(committed);
If the first xfs_attr_thing() failed, we'd skip the xfs_bmap_finish,
never set "committed", and then test it in the ASSERT.
Fix this up by moving the committed state internal to xfs_bmap_finish,
and add a new inode argument. If an inode is passed in, it is passed
through to __xfs_trans_roll() and joined to the transaction there if
the transaction was committed.
xfs_qm_dqalloc() was a little unique in that it called bjoin rather
than ijoin, but as Dave points out we can detect the committed state
but checking whether (*tpp != tp).
Addresses-Coverity-Id: 102360
Addresses-Coverity-Id: 102361
Addresses-Coverity-Id: 102363
Addresses-Coverity-Id: 102364
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-01-11 00:34:01 +00:00
|
|
|
if (error)
|
2013-08-12 10:49:45 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
allocated_fsb = imapp->br_blockcount;
|
|
|
|
|
|
|
|
if (nimaps == 0) {
|
2014-06-25 04:58:08 +00:00
|
|
|
error = -ENOSPC;
|
2013-08-12 10:49:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
startoffset_fsb += allocated_fsb;
|
|
|
|
allocatesize_fsb -= allocated_fsb;
|
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
|
2018-07-24 20:43:13 +00:00
|
|
|
error0: /* unlock inode, unreserve quota blocks, cancel trans */
|
2013-08-12 10:49:45 +00:00
|
|
|
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
|
|
|
|
|
|
|
|
error1: /* Just cancel transaction */
|
2015-06-04 03:47:56 +00:00
|
|
|
xfs_trans_cancel(tp);
|
2013-08-12 10:49:45 +00:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2016-06-21 00:00:55 +00:00
|
|
|
static int
|
|
|
|
xfs_unmap_extent(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_fileoff_t startoffset_fsb,
|
|
|
|
xfs_filblks_t len_fsb,
|
|
|
|
int *done)
|
2013-08-12 10:49:45 +00:00
|
|
|
{
|
2016-06-21 00:00:55 +00:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_trans *tp;
|
|
|
|
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
|
|
|
|
int error;
|
2013-08-12 10:49:45 +00:00
|
|
|
|
2016-06-21 00:00:55 +00:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
|
|
|
|
if (error) {
|
|
|
|
ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
|
|
|
|
return error;
|
|
|
|
}
|
2013-08-12 10:49:45 +00:00
|
|
|
|
2016-06-21 00:00:55 +00:00
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
|
error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
|
|
|
|
ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
|
|
|
|
if (error)
|
|
|
|
goto out_trans_cancel;
|
2013-08-12 10:49:45 +00:00
|
|
|
|
2016-06-21 00:00:55 +00:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2013-12-06 20:30:12 +00:00
|
|
|
|
2018-07-12 05:26:25 +00:00
|
|
|
error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
|
2016-06-21 00:00:55 +00:00
|
|
|
if (error)
|
2018-07-24 20:43:13 +00:00
|
|
|
goto out_trans_cancel;
|
2013-12-06 20:30:12 +00:00
|
|
|
|
2016-06-21 00:00:55 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
return error;
|
2015-06-03 23:19:08 +00:00
|
|
|
|
2016-06-21 00:00:55 +00:00
|
|
|
out_trans_cancel:
|
|
|
|
xfs_trans_cancel(tp);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2015-06-03 23:19:08 +00:00
|
|
|
|
2018-11-19 21:31:10 +00:00
|
|
|
int
|
2016-06-21 00:00:55 +00:00
|
|
|
xfs_flush_unmap_range(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_off_t offset,
|
|
|
|
xfs_off_t len)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct inode *inode = VFS_I(ip);
|
|
|
|
xfs_off_t rounding, start, end;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* wait for the completion of any pending DIOs */
|
|
|
|
inode_dio_wait(inode);
|
|
|
|
|
|
|
|
rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
|
|
|
|
start = round_down(offset, rounding);
|
|
|
|
end = round_up(offset + len, rounding) - 1;
|
|
|
|
|
|
|
|
error = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
truncate_pagecache_range(inode, start, end);
|
|
|
|
return 0;
|
2013-08-12 10:49:45 +00:00
|
|
|
}
|
|
|
|
|
2013-10-12 07:55:07 +00:00
|
|
|
int
|
2013-08-12 10:49:45 +00:00
|
|
|
xfs_free_file_space(
|
2013-10-12 07:55:07 +00:00
|
|
|
struct xfs_inode *ip,
|
2013-08-12 10:49:45 +00:00
|
|
|
xfs_off_t offset,
|
2013-10-12 07:55:06 +00:00
|
|
|
xfs_off_t len)
|
2013-08-12 10:49:45 +00:00
|
|
|
{
|
2016-06-21 00:00:55 +00:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2013-08-12 10:49:45 +00:00
|
|
|
xfs_fileoff_t startoffset_fsb;
|
2016-06-21 00:00:55 +00:00
|
|
|
xfs_fileoff_t endoffset_fsb;
|
2016-06-21 00:02:23 +00:00
|
|
|
int done = 0, error;
|
2013-08-12 10:49:45 +00:00
|
|
|
|
|
|
|
trace_xfs_free_file_space(ip);
|
|
|
|
|
2018-05-04 22:30:21 +00:00
|
|
|
error = xfs_qm_dqattach(ip);
|
2013-08-12 10:49:45 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
if (len <= 0) /* if nothing being freed */
|
2016-06-21 00:00:55 +00:00
|
|
|
return 0;
|
2013-08-12 10:49:45 +00:00
|
|
|
|
2016-06-21 00:00:55 +00:00
|
|
|
error = xfs_flush_unmap_range(ip, offset, len);
|
2013-08-12 10:49:45 +00:00
|
|
|
if (error)
|
2016-06-21 00:00:55 +00:00
|
|
|
return error;
|
|
|
|
|
|
|
|
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
|
|
|
|
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
|
2013-08-12 10:49:45 +00:00
|
|
|
|
|
|
|
/*
|
2018-10-18 06:18:58 +00:00
|
|
|
* Need to zero the stuff we're not freeing, on disk.
|
2013-08-12 10:49:45 +00:00
|
|
|
*/
|
2016-06-21 00:02:23 +00:00
|
|
|
if (endoffset_fsb > startoffset_fsb) {
|
|
|
|
while (!done) {
|
|
|
|
error = xfs_unmap_extent(ip, startoffset_fsb,
|
|
|
|
endoffset_fsb - startoffset_fsb, &done);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2013-08-12 10:49:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 00:02:23 +00:00
|
|
|
/*
|
|
|
|
* Now that we've unmap all full blocks we'll have to zero out any
|
2018-03-14 06:15:32 +00:00
|
|
|
* partial block at the beginning and/or end. iomap_zero_range is smart
|
|
|
|
* enough to skip any holes, including those we just created, but we
|
|
|
|
* must take care not to zero beyond EOF and enlarge i_size.
|
2016-06-21 00:02:23 +00:00
|
|
|
*/
|
2017-04-03 19:22:29 +00:00
|
|
|
if (offset >= XFS_ISIZE(ip))
|
|
|
|
return 0;
|
|
|
|
if (offset + len > XFS_ISIZE(ip))
|
|
|
|
len = XFS_ISIZE(ip) - offset;
|
2018-06-22 06:26:58 +00:00
|
|
|
error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we zeroed right up to EOF and EOF straddles a page boundary we
|
|
|
|
* must make sure that the post-EOF area is also zeroed because the
|
|
|
|
* page could be mmap'd and iomap_zero_range doesn't do that for us.
|
|
|
|
* Writeback of the eof page will do this, albeit clumsily.
|
|
|
|
*/
|
2018-11-27 19:01:43 +00:00
|
|
|
if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
|
2018-06-22 06:26:58 +00:00
|
|
|
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
|
2018-11-27 19:01:43 +00:00
|
|
|
round_down(offset + len, PAGE_SIZE), LLONG_MAX);
|
2018-06-22 06:26:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
2013-08-12 10:49:45 +00:00
|
|
|
}
|
|
|
|
|
xfs: rework zero range to prevent invalid i_size updates
The zero range operation is analogous to fallocate with the exception of
converting the range to zeroes. E.g., it attempts to allocate zeroed
blocks over the range specified by the caller. The XFS implementation
kills all delalloc blocks currently over the aligned range, converts the
range to allocated zero blocks (unwritten extents) and handles the
partial pages at the ends of the range by sending writes through the
pagecache.
The current implementation suffers from several problems associated with
inode size. If the aligned range covers an extending I/O, said I/O is
discarded and an inode size update from a previous write never makes it
to disk. Further, if an unaligned zero range extends beyond eof, the
page write induced for the partial end page can itself increase the
inode size, even if the zero range request is not supposed to update
i_size (via KEEP_SIZE, similar to an fallocate beyond EOF).
The latter behavior not only incorrectly increases the inode size, but
can lead to stray delalloc blocks on the inode. Typically, post-eof
preallocation blocks are either truncated on release or inode eviction
or explicitly written to by xfs_zero_eof() on natural file size
extension. If the inode size increases due to zero range, however,
associated blocks leak into the address space having never been
converted or mapped to pagecache pages. A direct I/O to such an
uncovered range cannot convert the extent via writeback and will BUG().
For example:
$ xfs_io -fc "pwrite 0 128k" -c "fzero -k 1m 54321" <file>
...
$ xfs_io -d -c "pread 128k 128k" <file>
<BUG>
If the entire delalloc extent happens to not have page coverage
whatsoever (e.g., delalloc conversion couldn't find a large enough free
space extent), even a full file writeback won't convert what's left of
the extent and we'll assert on inode eviction.
Rework xfs_zero_file_space() to avoid buffered I/O for partial pages.
Use the existing hole punch and prealloc mechanisms as primitives for
zero range. This implementation is not efficient nor ideal as we
writeback dirty data over the range and remove existing extents rather
than convert to unwrittern. The former writeback, however, is currently
the only mechanism available to ensure consistency between pagecache and
extent state. Even a pagecache truncate/delalloc punch prior to hole
punch has lead to inconsistencies due to racing with writeback.
This provides a consistent, correct implementation of zero range that
survives fsstress/fsx testing without assert failures. The
implementation can be optimized from this point forward once the
fundamental issue of pagecache and delalloc extent state consistency is
addressed.
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-10-29 23:35:11 +00:00
|
|
|
/*
|
|
|
|
* Preallocate and zero a range of a file. This mechanism has the allocation
|
|
|
|
* semantics of fallocate and in addition converts data in the range to zeroes.
|
|
|
|
*/
|
2013-10-12 07:55:08 +00:00
|
|
|
int
|
2013-08-12 10:49:45 +00:00
|
|
|
xfs_zero_file_space(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_off_t offset,
|
2013-10-12 07:55:06 +00:00
|
|
|
xfs_off_t len)
|
2013-08-12 10:49:45 +00:00
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
xfs: rework zero range to prevent invalid i_size updates
The zero range operation is analogous to fallocate with the exception of
converting the range to zeroes. E.g., it attempts to allocate zeroed
blocks over the range specified by the caller. The XFS implementation
kills all delalloc blocks currently over the aligned range, converts the
range to allocated zero blocks (unwritten extents) and handles the
partial pages at the ends of the range by sending writes through the
pagecache.
The current implementation suffers from several problems associated with
inode size. If the aligned range covers an extending I/O, said I/O is
discarded and an inode size update from a previous write never makes it
to disk. Further, if an unaligned zero range extends beyond eof, the
page write induced for the partial end page can itself increase the
inode size, even if the zero range request is not supposed to update
i_size (via KEEP_SIZE, similar to an fallocate beyond EOF).
The latter behavior not only incorrectly increases the inode size, but
can lead to stray delalloc blocks on the inode. Typically, post-eof
preallocation blocks are either truncated on release or inode eviction
or explicitly written to by xfs_zero_eof() on natural file size
extension. If the inode size increases due to zero range, however,
associated blocks leak into the address space having never been
converted or mapped to pagecache pages. A direct I/O to such an
uncovered range cannot convert the extent via writeback and will BUG().
For example:
$ xfs_io -fc "pwrite 0 128k" -c "fzero -k 1m 54321" <file>
...
$ xfs_io -d -c "pread 128k 128k" <file>
<BUG>
If the entire delalloc extent happens to not have page coverage
whatsoever (e.g., delalloc conversion couldn't find a large enough free
space extent), even a full file writeback won't convert what's left of
the extent and we'll assert on inode eviction.
Rework xfs_zero_file_space() to avoid buffered I/O for partial pages.
Use the existing hole punch and prealloc mechanisms as primitives for
zero range. This implementation is not efficient nor ideal as we
writeback dirty data over the range and remove existing extents rather
than convert to unwrittern. The former writeback, however, is currently
the only mechanism available to ensure consistency between pagecache and
extent state. Even a pagecache truncate/delalloc punch prior to hole
punch has lead to inconsistencies due to racing with writeback.
This provides a consistent, correct implementation of zero range that
survives fsstress/fsx testing without assert failures. The
implementation can be optimized from this point forward once the
fundamental issue of pagecache and delalloc extent state consistency is
addressed.
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-10-29 23:35:11 +00:00
|
|
|
uint blksize;
|
2013-08-12 10:49:45 +00:00
|
|
|
int error;
|
|
|
|
|
2014-04-14 08:15:11 +00:00
|
|
|
trace_xfs_zero_file_space(ip);
|
|
|
|
|
xfs: rework zero range to prevent invalid i_size updates
The zero range operation is analogous to fallocate with the exception of
converting the range to zeroes. E.g., it attempts to allocate zeroed
blocks over the range specified by the caller. The XFS implementation
kills all delalloc blocks currently over the aligned range, converts the
range to allocated zero blocks (unwritten extents) and handles the
partial pages at the ends of the range by sending writes through the
pagecache.
The current implementation suffers from several problems associated with
inode size. If the aligned range covers an extending I/O, said I/O is
discarded and an inode size update from a previous write never makes it
to disk. Further, if an unaligned zero range extends beyond eof, the
page write induced for the partial end page can itself increase the
inode size, even if the zero range request is not supposed to update
i_size (via KEEP_SIZE, similar to an fallocate beyond EOF).
The latter behavior not only incorrectly increases the inode size, but
can lead to stray delalloc blocks on the inode. Typically, post-eof
preallocation blocks are either truncated on release or inode eviction
or explicitly written to by xfs_zero_eof() on natural file size
extension. If the inode size increases due to zero range, however,
associated blocks leak into the address space having never been
converted or mapped to pagecache pages. A direct I/O to such an
uncovered range cannot convert the extent via writeback and will BUG().
For example:
$ xfs_io -fc "pwrite 0 128k" -c "fzero -k 1m 54321" <file>
...
$ xfs_io -d -c "pread 128k 128k" <file>
<BUG>
If the entire delalloc extent happens to not have page coverage
whatsoever (e.g., delalloc conversion couldn't find a large enough free
space extent), even a full file writeback won't convert what's left of
the extent and we'll assert on inode eviction.
Rework xfs_zero_file_space() to avoid buffered I/O for partial pages.
Use the existing hole punch and prealloc mechanisms as primitives for
zero range. This implementation is not efficient nor ideal as we
writeback dirty data over the range and remove existing extents rather
than convert to unwrittern. The former writeback, however, is currently
the only mechanism available to ensure consistency between pagecache and
extent state. Even a pagecache truncate/delalloc punch prior to hole
punch has lead to inconsistencies due to racing with writeback.
This provides a consistent, correct implementation of zero range that
survives fsstress/fsx testing without assert failures. The
implementation can be optimized from this point forward once the
fundamental issue of pagecache and delalloc extent state consistency is
addressed.
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-10-29 23:35:11 +00:00
|
|
|
blksize = 1 << mp->m_sb.sb_blocklog;
|
2013-08-12 10:49:45 +00:00
|
|
|
|
|
|
|
/*
|
xfs: rework zero range to prevent invalid i_size updates
The zero range operation is analogous to fallocate with the exception of
converting the range to zeroes. E.g., it attempts to allocate zeroed
blocks over the range specified by the caller. The XFS implementation
kills all delalloc blocks currently over the aligned range, converts the
range to allocated zero blocks (unwritten extents) and handles the
partial pages at the ends of the range by sending writes through the
pagecache.
The current implementation suffers from several problems associated with
inode size. If the aligned range covers an extending I/O, said I/O is
discarded and an inode size update from a previous write never makes it
to disk. Further, if an unaligned zero range extends beyond eof, the
page write induced for the partial end page can itself increase the
inode size, even if the zero range request is not supposed to update
i_size (via KEEP_SIZE, similar to an fallocate beyond EOF).
The latter behavior not only incorrectly increases the inode size, but
can lead to stray delalloc blocks on the inode. Typically, post-eof
preallocation blocks are either truncated on release or inode eviction
or explicitly written to by xfs_zero_eof() on natural file size
extension. If the inode size increases due to zero range, however,
associated blocks leak into the address space having never been
converted or mapped to pagecache pages. A direct I/O to such an
uncovered range cannot convert the extent via writeback and will BUG().
For example:
$ xfs_io -fc "pwrite 0 128k" -c "fzero -k 1m 54321" <file>
...
$ xfs_io -d -c "pread 128k 128k" <file>
<BUG>
If the entire delalloc extent happens to not have page coverage
whatsoever (e.g., delalloc conversion couldn't find a large enough free
space extent), even a full file writeback won't convert what's left of
the extent and we'll assert on inode eviction.
Rework xfs_zero_file_space() to avoid buffered I/O for partial pages.
Use the existing hole punch and prealloc mechanisms as primitives for
zero range. This implementation is not efficient nor ideal as we
writeback dirty data over the range and remove existing extents rather
than convert to unwrittern. The former writeback, however, is currently
the only mechanism available to ensure consistency between pagecache and
extent state. Even a pagecache truncate/delalloc punch prior to hole
punch has lead to inconsistencies due to racing with writeback.
This provides a consistent, correct implementation of zero range that
survives fsstress/fsx testing without assert failures. The
implementation can be optimized from this point forward once the
fundamental issue of pagecache and delalloc extent state consistency is
addressed.
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-10-29 23:35:11 +00:00
|
|
|
* Punch a hole and prealloc the range. We use hole punch rather than
|
|
|
|
* unwritten extent conversion for two reasons:
|
|
|
|
*
|
|
|
|
* 1.) Hole punch handles partial block zeroing for us.
|
|
|
|
*
|
|
|
|
* 2.) If prealloc returns ENOSPC, the file range is still zero-valued
|
|
|
|
* by virtue of the hole punch.
|
2013-08-12 10:49:45 +00:00
|
|
|
*/
|
xfs: rework zero range to prevent invalid i_size updates
The zero range operation is analogous to fallocate with the exception of
converting the range to zeroes. E.g., it attempts to allocate zeroed
blocks over the range specified by the caller. The XFS implementation
kills all delalloc blocks currently over the aligned range, converts the
range to allocated zero blocks (unwritten extents) and handles the
partial pages at the ends of the range by sending writes through the
pagecache.
The current implementation suffers from several problems associated with
inode size. If the aligned range covers an extending I/O, said I/O is
discarded and an inode size update from a previous write never makes it
to disk. Further, if an unaligned zero range extends beyond eof, the
page write induced for the partial end page can itself increase the
inode size, even if the zero range request is not supposed to update
i_size (via KEEP_SIZE, similar to an fallocate beyond EOF).
The latter behavior not only incorrectly increases the inode size, but
can lead to stray delalloc blocks on the inode. Typically, post-eof
preallocation blocks are either truncated on release or inode eviction
or explicitly written to by xfs_zero_eof() on natural file size
extension. If the inode size increases due to zero range, however,
associated blocks leak into the address space having never been
converted or mapped to pagecache pages. A direct I/O to such an
uncovered range cannot convert the extent via writeback and will BUG().
For example:
$ xfs_io -fc "pwrite 0 128k" -c "fzero -k 1m 54321" <file>
...
$ xfs_io -d -c "pread 128k 128k" <file>
<BUG>
If the entire delalloc extent happens to not have page coverage
whatsoever (e.g., delalloc conversion couldn't find a large enough free
space extent), even a full file writeback won't convert what's left of
the extent and we'll assert on inode eviction.
Rework xfs_zero_file_space() to avoid buffered I/O for partial pages.
Use the existing hole punch and prealloc mechanisms as primitives for
zero range. This implementation is not efficient nor ideal as we
writeback dirty data over the range and remove existing extents rather
than convert to unwrittern. The former writeback, however, is currently
the only mechanism available to ensure consistency between pagecache and
extent state. Even a pagecache truncate/delalloc punch prior to hole
punch has lead to inconsistencies due to racing with writeback.
This provides a consistent, correct implementation of zero range that
survives fsstress/fsx testing without assert failures. The
implementation can be optimized from this point forward once the
fundamental issue of pagecache and delalloc extent state consistency is
addressed.
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-10-29 23:35:11 +00:00
|
|
|
error = xfs_free_file_space(ip, offset, len);
|
2019-02-18 17:38:49 +00:00
|
|
|
if (error || xfs_is_always_cow_inode(ip))
|
|
|
|
return error;
|
2013-08-12 10:49:45 +00:00
|
|
|
|
2019-02-18 17:38:49 +00:00
|
|
|
return xfs_alloc_file_space(ip, round_down(offset, blksize),
|
xfs: rework zero range to prevent invalid i_size updates
The zero range operation is analogous to fallocate with the exception of
converting the range to zeroes. E.g., it attempts to allocate zeroed
blocks over the range specified by the caller. The XFS implementation
kills all delalloc blocks currently over the aligned range, converts the
range to allocated zero blocks (unwritten extents) and handles the
partial pages at the ends of the range by sending writes through the
pagecache.
The current implementation suffers from several problems associated with
inode size. If the aligned range covers an extending I/O, said I/O is
discarded and an inode size update from a previous write never makes it
to disk. Further, if an unaligned zero range extends beyond eof, the
page write induced for the partial end page can itself increase the
inode size, even if the zero range request is not supposed to update
i_size (via KEEP_SIZE, similar to an fallocate beyond EOF).
The latter behavior not only incorrectly increases the inode size, but
can lead to stray delalloc blocks on the inode. Typically, post-eof
preallocation blocks are either truncated on release or inode eviction
or explicitly written to by xfs_zero_eof() on natural file size
extension. If the inode size increases due to zero range, however,
associated blocks leak into the address space having never been
converted or mapped to pagecache pages. A direct I/O to such an
uncovered range cannot convert the extent via writeback and will BUG().
For example:
$ xfs_io -fc "pwrite 0 128k" -c "fzero -k 1m 54321" <file>
...
$ xfs_io -d -c "pread 128k 128k" <file>
<BUG>
If the entire delalloc extent happens to not have page coverage
whatsoever (e.g., delalloc conversion couldn't find a large enough free
space extent), even a full file writeback won't convert what's left of
the extent and we'll assert on inode eviction.
Rework xfs_zero_file_space() to avoid buffered I/O for partial pages.
Use the existing hole punch and prealloc mechanisms as primitives for
zero range. This implementation is not efficient nor ideal as we
writeback dirty data over the range and remove existing extents rather
than convert to unwrittern. The former writeback, however, is currently
the only mechanism available to ensure consistency between pagecache and
extent state. Even a pagecache truncate/delalloc punch prior to hole
punch has lead to inconsistencies due to racing with writeback.
This provides a consistent, correct implementation of zero range that
survives fsstress/fsx testing without assert failures. The
implementation can be optimized from this point forward once the
fundamental issue of pagecache and delalloc extent state consistency is
addressed.
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-10-29 23:35:11 +00:00
|
|
|
round_up(offset + len, blksize) -
|
|
|
|
round_down(offset, blksize),
|
|
|
|
XFS_BMAPI_PREALLOC);
|
2013-08-12 10:49:45 +00:00
|
|
|
}
|
|
|
|
|
2015-04-13 01:25:04 +00:00
|
|
|
static int
|
2017-10-19 18:07:10 +00:00
|
|
|
xfs_prepare_shift(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
loff_t offset)
|
2014-02-23 23:58:19 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2014-09-23 05:39:05 +00:00
|
|
|
/*
|
|
|
|
* Trim eofblocks to avoid shifting uninitialized post-eof preallocation
|
|
|
|
* into the accessible region of the file.
|
|
|
|
*/
|
2014-09-02 02:12:53 +00:00
|
|
|
if (xfs_can_free_eofblocks(ip, true)) {
|
2017-01-28 07:22:55 +00:00
|
|
|
error = xfs_free_eofblocks(ip);
|
2014-09-02 02:12:53 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
2014-09-02 02:12:53 +00:00
|
|
|
|
2014-09-23 05:39:05 +00:00
|
|
|
/*
|
|
|
|
* Writeback and invalidate cache for the remainder of the file as we're
|
2015-03-25 04:08:56 +00:00
|
|
|
* about to shift down every extent from offset to EOF.
|
2014-09-23 05:39:05 +00:00
|
|
|
*/
|
2018-11-19 21:31:09 +00:00
|
|
|
error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
|
2014-02-23 23:58:19 +00:00
|
|
|
|
2015-03-25 04:08:56 +00:00
|
|
|
/*
|
2017-09-18 16:41:17 +00:00
|
|
|
* Clean out anything hanging around in the cow fork now that
|
|
|
|
* we've flushed all the dirty data out to disk to avoid having
|
|
|
|
* CoW extents at the wrong offsets.
|
|
|
|
*/
|
2018-07-17 23:51:51 +00:00
|
|
|
if (xfs_inode_has_cow_data(ip)) {
|
2017-09-18 16:41:17 +00:00
|
|
|
error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
|
|
|
|
true);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2017-10-19 18:07:10 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* xfs_collapse_file_space()
|
|
|
|
* This routine frees disk space and shift extent for the given file.
|
|
|
|
* The first thing we do is to free data blocks in the specified range
|
|
|
|
* by calling xfs_free_file_space(). It would also sync dirty data
|
|
|
|
* and invalidate page cache over the region on which collapse range
|
|
|
|
* is working. And Shift extent records to the left to cover a hole.
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on success
|
|
|
|
* errno on error
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_collapse_file_space(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_off_t offset,
|
|
|
|
xfs_off_t len)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_trans *tp;
|
|
|
|
int error;
|
|
|
|
xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
|
|
|
|
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
|
|
|
|
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
|
2017-10-19 18:07:11 +00:00
|
|
|
bool done = false;
|
2017-10-19 18:07:10 +00:00
|
|
|
|
|
|
|
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
|
2017-10-23 23:32:38 +00:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
|
|
|
|
|
2017-10-19 18:07:10 +00:00
|
|
|
trace_xfs_collapse_file_space(ip);
|
|
|
|
|
|
|
|
error = xfs_free_file_space(ip, offset, len);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
error = xfs_prepare_shift(ip, offset);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2015-03-25 04:08:56 +00:00
|
|
|
|
2014-02-23 23:58:19 +00:00
|
|
|
while (!error && !done) {
|
2017-02-15 18:18:10 +00:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
|
|
|
|
&tp);
|
2016-04-05 23:19:55 +00:00
|
|
|
if (error)
|
2014-02-23 23:58:19 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
|
error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
|
2017-02-15 18:18:10 +00:00
|
|
|
ip->i_gdquot, ip->i_pdquot, resblks, 0,
|
2014-02-23 23:58:19 +00:00
|
|
|
XFS_QMOPT_RES_REGBLKS);
|
|
|
|
if (error)
|
2015-08-19 00:01:40 +00:00
|
|
|
goto out_trans_cancel;
|
2015-03-25 04:08:56 +00:00
|
|
|
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
2014-02-23 23:58:19 +00:00
|
|
|
|
2017-10-19 18:07:11 +00:00
|
|
|
error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
|
2018-07-12 05:26:27 +00:00
|
|
|
&done);
|
2014-02-23 23:58:19 +00:00
|
|
|
if (error)
|
2018-07-24 20:43:13 +00:00
|
|
|
goto out_trans_cancel;
|
2014-02-23 23:58:19 +00:00
|
|
|
|
2015-06-04 03:48:08 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
2014-02-23 23:58:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
|
2015-08-19 00:01:40 +00:00
|
|
|
out_trans_cancel:
|
2015-06-04 03:47:56 +00:00
|
|
|
xfs_trans_cancel(tp);
|
2014-02-23 23:58:19 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2015-03-25 04:08:56 +00:00
|
|
|
/*
|
|
|
|
* xfs_insert_file_space()
|
|
|
|
* This routine create hole space by shifting extents for the given file.
|
|
|
|
* The first thing we do is to sync dirty data and invalidate page cache
|
|
|
|
* over the region on which insert range is working. And split an extent
|
|
|
|
* to two extents at given offset by calling xfs_bmap_split_extent.
|
|
|
|
* And shift all extent records which are laying between [offset,
|
|
|
|
* last allocated extent] to the right to reserve hole range.
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on success
|
|
|
|
* errno on error
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_insert_file_space(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t len)
|
|
|
|
{
|
2017-10-19 18:07:10 +00:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_trans *tp;
|
|
|
|
int error;
|
|
|
|
xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
|
|
|
|
xfs_fileoff_t next_fsb = NULLFSBLOCK;
|
|
|
|
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
|
2017-10-19 18:07:11 +00:00
|
|
|
bool done = false;
|
2017-10-19 18:07:10 +00:00
|
|
|
|
2015-03-25 04:08:56 +00:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
|
2017-10-23 23:32:38 +00:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
|
|
|
|
|
2015-03-25 04:08:56 +00:00
|
|
|
trace_xfs_insert_file_space(ip);
|
|
|
|
|
2018-06-22 06:26:57 +00:00
|
|
|
error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2017-10-19 18:07:10 +00:00
|
|
|
error = xfs_prepare_shift(ip, offset);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The extent shifting code works on extent granularity. So, if stop_fsb
|
|
|
|
* is not the starting block of extent, we need to split the extent at
|
|
|
|
* stop_fsb.
|
|
|
|
*/
|
|
|
|
error = xfs_bmap_split_extent(ip, stop_fsb);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
while (!error && !done) {
|
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
|
|
|
|
&tp);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
|
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
2017-10-19 18:07:11 +00:00
|
|
|
error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
|
2018-07-12 05:26:27 +00:00
|
|
|
&done, stop_fsb);
|
2017-10-19 18:07:10 +00:00
|
|
|
if (error)
|
2018-07-24 20:43:13 +00:00
|
|
|
goto out_trans_cancel;
|
2017-10-19 18:07:10 +00:00
|
|
|
|
|
|
|
error = xfs_trans_commit(tp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
|
2018-07-24 20:43:13 +00:00
|
|
|
out_trans_cancel:
|
2017-10-19 18:07:10 +00:00
|
|
|
xfs_trans_cancel(tp);
|
|
|
|
return error;
|
2015-03-25 04:08:56 +00:00
|
|
|
}
|
|
|
|
|
2013-08-12 10:49:48 +00:00
|
|
|
/*
|
|
|
|
* We need to check that the format of the data fork in the temporary inode is
|
|
|
|
* valid for the target inode before doing the swap. This is not a problem with
|
|
|
|
* attr1 because of the fixed fork offset, but attr2 has a dynamically sized
|
|
|
|
* data fork depending on the space the attribute fork is taking so we can get
|
|
|
|
* invalid formats on the target inode.
|
|
|
|
*
|
|
|
|
* E.g. target has space for 7 extents in extent format, temp inode only has
|
|
|
|
* space for 6. If we defragment down to 7 extents, then the tmp format is a
|
|
|
|
* btree, but when swapped it needs to be in extent format. Hence we can't just
|
|
|
|
* blindly swap data forks on attr2 filesystems.
|
|
|
|
*
|
|
|
|
* Note that we check the swap in both directions so that we don't end up with
|
|
|
|
* a corrupt temporary inode, either.
|
|
|
|
*
|
|
|
|
* Note that fixing the way xfs_fsr sets up the attribute fork in the source
|
|
|
|
* inode will prevent this situation from occurring, so all we do here is
|
|
|
|
* reject and log the attempt. basically we are putting the responsibility on
|
|
|
|
* userspace to get this right.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xfs_swap_extents_check_format(
|
2016-10-03 16:11:52 +00:00
|
|
|
struct xfs_inode *ip, /* target inode */
|
|
|
|
struct xfs_inode *tip) /* tmp inode */
|
2013-08-12 10:49:48 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
/* Should never get a local format */
|
|
|
|
if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
|
|
|
|
tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EINVAL;
|
2013-08-12 10:49:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if the target inode has less extents that then temporary inode then
|
|
|
|
* why did userspace call us?
|
|
|
|
*/
|
|
|
|
if (ip->i_d.di_nextents < tip->i_d.di_nextents)
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EINVAL;
|
2013-08-12 10:49:48 +00:00
|
|
|
|
2016-10-03 16:11:53 +00:00
|
|
|
/*
|
|
|
|
* If we have to use the (expensive) rmap swap method, we can
|
|
|
|
* handle any number of extents and any format.
|
|
|
|
*/
|
|
|
|
if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
|
|
|
|
return 0;
|
|
|
|
|
2013-08-12 10:49:48 +00:00
|
|
|
/*
|
|
|
|
* if the target inode is in extent form and the temp inode is in btree
|
|
|
|
* form then we will end up with the target inode in the wrong format
|
|
|
|
* as we already know there are less extents in the temp inode.
|
|
|
|
*/
|
|
|
|
if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
|
|
|
|
tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EINVAL;
|
2013-08-12 10:49:48 +00:00
|
|
|
|
|
|
|
/* Check temp in extent form to max in target */
|
|
|
|
if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
|
|
|
|
XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
|
|
|
|
XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EINVAL;
|
2013-08-12 10:49:48 +00:00
|
|
|
|
|
|
|
/* Check target in extent form to max in temp */
|
|
|
|
if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
|
|
|
|
XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
|
|
|
|
XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EINVAL;
|
2013-08-12 10:49:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are in a btree format, check that the temp root block will fit
|
|
|
|
* in the target and that it has enough extents to be in btree format
|
|
|
|
* in the target.
|
|
|
|
*
|
|
|
|
* Note that we have to be careful to allow btree->extent conversions
|
|
|
|
* (a common defrag case) which will occur when the temp inode is in
|
|
|
|
* extent format...
|
|
|
|
*/
|
|
|
|
if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
|
2017-06-15 04:35:34 +00:00
|
|
|
if (XFS_IFORK_Q(ip) &&
|
2013-08-12 10:49:48 +00:00
|
|
|
XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EINVAL;
|
2013-08-12 10:49:48 +00:00
|
|
|
if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
|
|
|
|
XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EINVAL;
|
2013-08-12 10:49:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Reciprocal target->temp btree format checks */
|
|
|
|
if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
|
2017-06-15 04:35:34 +00:00
|
|
|
if (XFS_IFORK_Q(tip) &&
|
2013-08-12 10:49:48 +00:00
|
|
|
XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EINVAL;
|
2013-08-12 10:49:48 +00:00
|
|
|
if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
|
|
|
|
XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EINVAL;
|
2013-08-12 10:49:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-23 06:20:11 +00:00
|
|
|
static int
|
2014-08-04 03:44:08 +00:00
|
|
|
xfs_swap_extent_flush(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
truncate_pagecache_range(VFS_I(ip), 0, -1);
|
|
|
|
|
|
|
|
/* Verify O_DIRECT for ftmp */
|
|
|
|
if (VFS_I(ip)->i_mapping->nrpages)
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-03 16:11:53 +00:00
|
|
|
/*
|
|
|
|
* Move extents from one file to another, when rmap is enabled.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_swap_extent_rmap(
|
|
|
|
struct xfs_trans **tpp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_inode *tip)
|
|
|
|
{
|
2018-07-12 05:26:17 +00:00
|
|
|
struct xfs_trans *tp = *tpp;
|
2016-10-03 16:11:53 +00:00
|
|
|
struct xfs_bmbt_irec irec;
|
|
|
|
struct xfs_bmbt_irec uirec;
|
|
|
|
struct xfs_bmbt_irec tirec;
|
|
|
|
xfs_fileoff_t offset_fsb;
|
|
|
|
xfs_fileoff_t end_fsb;
|
|
|
|
xfs_filblks_t count_fsb;
|
|
|
|
int error;
|
|
|
|
xfs_filblks_t ilen;
|
|
|
|
xfs_filblks_t rlen;
|
|
|
|
int nimaps;
|
2017-06-16 18:00:05 +00:00
|
|
|
uint64_t tip_flags2;
|
2016-10-03 16:11:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the source file has shared blocks, we must flag the donor
|
|
|
|
* file as having shared blocks so that we get the shared-block
|
|
|
|
* rmap functions when we go to fix up the rmaps. The flags
|
|
|
|
* will be switch for reals later.
|
|
|
|
*/
|
|
|
|
tip_flags2 = tip->i_d.di_flags2;
|
|
|
|
if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
|
|
|
|
tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
|
|
|
|
|
|
|
|
offset_fsb = 0;
|
|
|
|
end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
|
|
|
|
count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
|
|
|
|
|
|
|
|
while (count_fsb) {
|
|
|
|
/* Read extent from the donor file */
|
|
|
|
nimaps = 1;
|
|
|
|
error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
|
|
|
|
&nimaps, 0);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
ASSERT(nimaps == 1);
|
|
|
|
ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
|
|
|
|
|
|
|
|
trace_xfs_swap_extent_rmap_remap(tip, &tirec);
|
|
|
|
ilen = tirec.br_blockcount;
|
|
|
|
|
|
|
|
/* Unmap the old blocks in the source file. */
|
|
|
|
while (tirec.br_blockcount) {
|
2018-07-24 20:43:13 +00:00
|
|
|
ASSERT(tp->t_firstblock == NULLFSBLOCK);
|
2016-10-03 16:11:53 +00:00
|
|
|
trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
|
|
|
|
|
|
|
|
/* Read extent from the source file */
|
|
|
|
nimaps = 1;
|
|
|
|
error = xfs_bmapi_read(ip, tirec.br_startoff,
|
|
|
|
tirec.br_blockcount, &irec,
|
|
|
|
&nimaps, 0);
|
|
|
|
if (error)
|
2018-09-29 03:41:58 +00:00
|
|
|
goto out;
|
2016-10-03 16:11:53 +00:00
|
|
|
ASSERT(nimaps == 1);
|
|
|
|
ASSERT(tirec.br_startoff == irec.br_startoff);
|
|
|
|
trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
|
|
|
|
|
|
|
|
/* Trim the extent. */
|
|
|
|
uirec = tirec;
|
|
|
|
uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
|
|
|
|
tirec.br_blockcount,
|
|
|
|
irec.br_blockcount);
|
|
|
|
trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
|
|
|
|
|
|
|
|
/* Remove the mapping from the donor file. */
|
2018-08-01 14:20:34 +00:00
|
|
|
error = xfs_bmap_unmap_extent(tp, tip, &uirec);
|
2016-10-03 16:11:53 +00:00
|
|
|
if (error)
|
2018-09-29 03:41:58 +00:00
|
|
|
goto out;
|
2016-10-03 16:11:53 +00:00
|
|
|
|
|
|
|
/* Remove the mapping from the source file. */
|
2018-08-01 14:20:34 +00:00
|
|
|
error = xfs_bmap_unmap_extent(tp, ip, &irec);
|
2016-10-03 16:11:53 +00:00
|
|
|
if (error)
|
2018-09-29 03:41:58 +00:00
|
|
|
goto out;
|
2016-10-03 16:11:53 +00:00
|
|
|
|
|
|
|
/* Map the donor file's blocks into the source file. */
|
2018-08-01 14:20:34 +00:00
|
|
|
error = xfs_bmap_map_extent(tp, ip, &uirec);
|
2016-10-03 16:11:53 +00:00
|
|
|
if (error)
|
2018-09-29 03:41:58 +00:00
|
|
|
goto out;
|
2016-10-03 16:11:53 +00:00
|
|
|
|
|
|
|
/* Map the source file's blocks into the donor file. */
|
2018-08-01 14:20:34 +00:00
|
|
|
error = xfs_bmap_map_extent(tp, tip, &irec);
|
2016-10-03 16:11:53 +00:00
|
|
|
if (error)
|
2018-09-29 03:41:58 +00:00
|
|
|
goto out;
|
2016-10-03 16:11:53 +00:00
|
|
|
|
2018-07-24 20:43:15 +00:00
|
|
|
error = xfs_defer_finish(tpp);
|
2018-07-12 05:26:17 +00:00
|
|
|
tp = *tpp;
|
2016-10-03 16:11:53 +00:00
|
|
|
if (error)
|
2018-08-01 14:20:33 +00:00
|
|
|
goto out;
|
2016-10-03 16:11:53 +00:00
|
|
|
|
|
|
|
tirec.br_startoff += rlen;
|
|
|
|
if (tirec.br_startblock != HOLESTARTBLOCK &&
|
|
|
|
tirec.br_startblock != DELAYSTARTBLOCK)
|
|
|
|
tirec.br_startblock += rlen;
|
|
|
|
tirec.br_blockcount -= rlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Roll on... */
|
|
|
|
count_fsb -= ilen;
|
|
|
|
offset_fsb += ilen;
|
|
|
|
}
|
|
|
|
|
|
|
|
tip->i_d.di_flags2 = tip_flags2;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
|
|
|
|
tip->i_d.di_flags2 = tip_flags2;
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2016-10-03 16:11:53 +00:00
|
|
|
/* Swap the extents of two files by swapping data forks. */
|
|
|
|
STATIC int
|
|
|
|
xfs_swap_extent_forks(
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_inode *tip,
|
|
|
|
int *src_log_flags,
|
|
|
|
int *target_log_flags)
|
2013-08-12 10:49:48 +00:00
|
|
|
{
|
2017-06-16 18:00:12 +00:00
|
|
|
xfs_filblks_t aforkblks = 0;
|
|
|
|
xfs_filblks_t taforkblks = 0;
|
|
|
|
xfs_extnum_t junk;
|
2017-06-16 18:00:05 +00:00
|
|
|
uint64_t tmp;
|
2016-10-03 16:11:53 +00:00
|
|
|
int error;
|
2013-08-12 10:49:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Count the number of extended attribute blocks
|
|
|
|
*/
|
|
|
|
if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
|
|
|
|
(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
|
2017-06-16 18:00:12 +00:00
|
|
|
error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
|
2016-10-03 16:11:53 +00:00
|
|
|
&aforkblks);
|
2013-08-12 10:49:48 +00:00
|
|
|
if (error)
|
2016-10-03 16:11:53 +00:00
|
|
|
return error;
|
2013-08-12 10:49:48 +00:00
|
|
|
}
|
|
|
|
if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
|
|
|
|
(tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
|
2017-06-16 18:00:12 +00:00
|
|
|
error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
|
2016-10-03 16:11:53 +00:00
|
|
|
&taforkblks);
|
2013-08-12 10:49:48 +00:00
|
|
|
if (error)
|
2016-10-03 16:11:53 +00:00
|
|
|
return error;
|
2013-08-12 10:49:48 +00:00
|
|
|
}
|
|
|
|
|
2013-08-30 00:23:44 +00:00
|
|
|
/*
|
2017-08-29 17:08:39 +00:00
|
|
|
* Btree format (v3) inodes have the inode number stamped in the bmbt
|
|
|
|
* block headers. We can't start changing the bmbt blocks until the
|
|
|
|
* inode owner change is logged so recovery does the right thing in the
|
|
|
|
* event of a crash. Set the owner change log flags now and leave the
|
|
|
|
* bmbt scan as the last step.
|
2013-08-30 00:23:44 +00:00
|
|
|
*/
|
|
|
|
if (ip->i_d.di_version == 3 &&
|
2017-08-29 17:08:39 +00:00
|
|
|
ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
|
2016-10-03 16:11:53 +00:00
|
|
|
(*target_log_flags) |= XFS_ILOG_DOWNER;
|
2013-08-30 00:23:44 +00:00
|
|
|
if (tip->i_d.di_version == 3 &&
|
2017-08-29 17:08:39 +00:00
|
|
|
tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
|
2016-10-03 16:11:53 +00:00
|
|
|
(*src_log_flags) |= XFS_ILOG_DOWNER;
|
2013-08-30 00:23:44 +00:00
|
|
|
|
2013-08-12 10:49:48 +00:00
|
|
|
/*
|
|
|
|
* Swap the data forks of the inodes
|
|
|
|
*/
|
2018-07-12 05:26:38 +00:00
|
|
|
swap(ip->i_df, tip->i_df);
|
2013-08-12 10:49:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fix the on-disk inode values
|
|
|
|
*/
|
2017-06-16 18:00:05 +00:00
|
|
|
tmp = (uint64_t)ip->i_d.di_nblocks;
|
2013-08-12 10:49:48 +00:00
|
|
|
ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
|
|
|
|
tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
|
|
|
|
|
2018-07-12 05:26:38 +00:00
|
|
|
swap(ip->i_d.di_nextents, tip->i_d.di_nextents);
|
|
|
|
swap(ip->i_d.di_format, tip->i_d.di_format);
|
2013-08-12 10:49:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The extents in the source inode could still contain speculative
|
|
|
|
* preallocation beyond EOF (e.g. the file is open but not modified
|
|
|
|
* while defrag is in progress). In that case, we need to copy over the
|
|
|
|
* number of delalloc blocks the data fork in the source inode is
|
|
|
|
* tracking beyond EOF so that when the fork is truncated away when the
|
|
|
|
* temporary inode is unlinked we don't underrun the i_delayed_blks
|
|
|
|
* counter on that inode.
|
|
|
|
*/
|
|
|
|
ASSERT(tip->i_delayed_blks == 0);
|
|
|
|
tip->i_delayed_blks = ip->i_delayed_blks;
|
|
|
|
ip->i_delayed_blks = 0;
|
|
|
|
|
|
|
|
switch (ip->i_d.di_format) {
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2016-10-03 16:11:53 +00:00
|
|
|
(*src_log_flags) |= XFS_ILOG_DEXT;
|
2013-08-12 10:49:48 +00:00
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2013-08-30 00:23:44 +00:00
|
|
|
ASSERT(ip->i_d.di_version < 3 ||
|
2016-10-03 16:11:53 +00:00
|
|
|
(*src_log_flags & XFS_ILOG_DOWNER));
|
|
|
|
(*src_log_flags) |= XFS_ILOG_DBROOT;
|
2013-08-12 10:49:48 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (tip->i_d.di_format) {
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
2016-10-03 16:11:53 +00:00
|
|
|
(*target_log_flags) |= XFS_ILOG_DEXT;
|
2013-08-12 10:49:48 +00:00
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2016-10-03 16:11:53 +00:00
|
|
|
(*target_log_flags) |= XFS_ILOG_DBROOT;
|
2013-08-30 00:23:44 +00:00
|
|
|
ASSERT(tip->i_d.di_version < 3 ||
|
2016-10-03 16:11:53 +00:00
|
|
|
(*target_log_flags & XFS_ILOG_DOWNER));
|
2013-08-12 10:49:48 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-10-03 16:11:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-29 17:08:40 +00:00
|
|
|
/*
|
|
|
|
* Fix up the owners of the bmbt blocks to refer to the current inode. The
|
|
|
|
* change owner scan attempts to order all modified buffers in the current
|
|
|
|
* transaction. In the event of ordered buffer failure, the offending buffer is
|
|
|
|
* physically logged as a fallback and the scan returns -EAGAIN. We must roll
|
|
|
|
* the transaction in this case to replenish the fallback log reservation and
|
|
|
|
* restart the scan. This process repeats until the scan completes.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xfs_swap_change_owner(
|
|
|
|
struct xfs_trans **tpp,
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_inode *tmpip)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct xfs_trans *tp = *tpp;
|
|
|
|
|
|
|
|
do {
|
|
|
|
error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
|
|
|
|
NULL);
|
|
|
|
/* success or fatal error */
|
|
|
|
if (error != -EAGAIN)
|
|
|
|
break;
|
|
|
|
|
|
|
|
error = xfs_trans_roll(tpp);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
tp = *tpp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Redirty both inodes so they can relog and keep the log tail
|
|
|
|
* moving forward.
|
|
|
|
*/
|
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
|
|
|
xfs_trans_ijoin(tp, tmpip, 0);
|
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
|
|
|
|
} while (true);
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2016-10-03 16:11:53 +00:00
|
|
|
int
|
|
|
|
xfs_swap_extents(
|
|
|
|
struct xfs_inode *ip, /* target inode */
|
|
|
|
struct xfs_inode *tip, /* tmp inode */
|
|
|
|
struct xfs_swapext *sxp)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_trans *tp;
|
|
|
|
struct xfs_bstat *sbp = &sxp->sx_stat;
|
|
|
|
int src_log_flags, target_log_flags;
|
|
|
|
int error = 0;
|
|
|
|
int lock_flags;
|
2017-06-16 18:00:05 +00:00
|
|
|
uint64_t f;
|
2017-08-29 17:08:40 +00:00
|
|
|
int resblks = 0;
|
2016-10-03 16:11:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock the inodes against other IO, page faults and truncate to
|
|
|
|
* begin with. Then we can ensure the inodes are flushed and have no
|
|
|
|
* page cache safely. Once we have done this we can take the ilocks and
|
|
|
|
* do the rest of the checks.
|
|
|
|
*/
|
2016-11-30 03:33:25 +00:00
|
|
|
lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
|
|
|
|
lock_flags = XFS_MMAPLOCK_EXCL;
|
2018-01-26 23:27:33 +00:00
|
|
|
xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
|
2016-10-03 16:11:53 +00:00
|
|
|
|
|
|
|
/* Verify that both files have the same format */
|
|
|
|
if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
|
|
|
|
error = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify both files are either real-time or non-realtime */
|
|
|
|
if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
|
|
|
|
error = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = xfs_swap_extent_flush(ip);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
error = xfs_swap_extent_flush(tip);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2018-10-18 06:21:55 +00:00
|
|
|
if (xfs_inode_has_cow_data(tip)) {
|
|
|
|
error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2016-10-03 16:11:53 +00:00
|
|
|
/*
|
|
|
|
* Extent "swapping" with rmap requires a permanent reservation and
|
|
|
|
* a block reservation because it's really just a remap operation
|
|
|
|
* performed with log redo items!
|
|
|
|
*/
|
|
|
|
if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
|
2018-03-09 22:01:58 +00:00
|
|
|
int w = XFS_DATA_FORK;
|
|
|
|
uint32_t ipnext = XFS_IFORK_NEXTENTS(ip, w);
|
|
|
|
uint32_t tipnext = XFS_IFORK_NEXTENTS(tip, w);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Conceptually this shouldn't affect the shape of either bmbt,
|
|
|
|
* but since we atomically move extents one by one, we reserve
|
|
|
|
* enough space to rebuild both trees.
|
|
|
|
*/
|
|
|
|
resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
|
|
|
|
resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
|
|
|
|
|
2016-10-03 16:11:53 +00:00
|
|
|
/*
|
2018-03-09 22:01:58 +00:00
|
|
|
* Handle the corner case where either inode might straddle the
|
|
|
|
* btree format boundary. If so, the inode could bounce between
|
|
|
|
* btree <-> extent format on unmap -> remap cycles, freeing and
|
|
|
|
* allocating a bmapbt block each time.
|
2016-10-03 16:11:53 +00:00
|
|
|
*/
|
2018-03-09 22:01:58 +00:00
|
|
|
if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1))
|
|
|
|
resblks += XFS_IFORK_MAXEXT(ip, w);
|
|
|
|
if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1))
|
|
|
|
resblks += XFS_IFORK_MAXEXT(tip, w);
|
2017-08-29 17:08:40 +00:00
|
|
|
}
|
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
|
2016-10-03 16:11:53 +00:00
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock and join the inodes to the tansaction so that transaction commit
|
|
|
|
* or cancel will unlock the inodes from this point onwards.
|
|
|
|
*/
|
2018-01-26 23:27:33 +00:00
|
|
|
xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
|
2016-10-03 16:11:53 +00:00
|
|
|
lock_flags |= XFS_ILOCK_EXCL;
|
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
|
|
|
xfs_trans_ijoin(tp, tip, 0);
|
|
|
|
|
|
|
|
|
|
|
|
/* Verify all data are being swapped */
|
|
|
|
if (sxp->sx_offset != 0 ||
|
|
|
|
sxp->sx_length != ip->i_d.di_size ||
|
|
|
|
sxp->sx_length != tip->i_d.di_size) {
|
|
|
|
error = -EFAULT;
|
|
|
|
goto out_trans_cancel;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_xfs_swap_extent_before(ip, 0);
|
|
|
|
trace_xfs_swap_extent_before(tip, 1);
|
|
|
|
|
|
|
|
/* check inode formats now that data is flushed */
|
|
|
|
error = xfs_swap_extents_check_format(ip, tip);
|
|
|
|
if (error) {
|
|
|
|
xfs_notice(mp,
|
|
|
|
"%s: inode 0x%llx format is incompatible for exchanging.",
|
|
|
|
__func__, ip->i_ino);
|
|
|
|
goto out_trans_cancel;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compare the current change & modify times with that
|
|
|
|
* passed in. If they differ, we abort this swap.
|
|
|
|
* This is the mechanism used to ensure the calling
|
|
|
|
* process that the file was not changed out from
|
|
|
|
* under it.
|
|
|
|
*/
|
|
|
|
if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
|
|
|
|
(sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
|
|
|
|
(sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
|
|
|
|
(sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
|
|
|
|
error = -EBUSY;
|
|
|
|
goto out_trans_cancel;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note the trickiness in setting the log flags - we set the owner log
|
|
|
|
* flag on the opposite inode (i.e. the inode we are setting the new
|
|
|
|
* owner to be) because once we swap the forks and log that, log
|
|
|
|
* recovery is going to see the fork as owned by the swapped inode,
|
|
|
|
* not the pre-swapped inodes.
|
|
|
|
*/
|
|
|
|
src_log_flags = XFS_ILOG_CORE;
|
|
|
|
target_log_flags = XFS_ILOG_CORE;
|
|
|
|
|
2016-10-03 16:11:53 +00:00
|
|
|
if (xfs_sb_version_hasrmapbt(&mp->m_sb))
|
|
|
|
error = xfs_swap_extent_rmap(&tp, ip, tip);
|
|
|
|
else
|
|
|
|
error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
|
|
|
|
&target_log_flags);
|
2016-10-03 16:11:53 +00:00
|
|
|
if (error)
|
|
|
|
goto out_trans_cancel;
|
|
|
|
|
2016-10-03 16:11:42 +00:00
|
|
|
/* Do we have to swap reflink flags? */
|
|
|
|
if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
|
|
|
|
(tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
|
|
|
|
f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
|
|
|
|
ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
|
|
|
|
ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
|
|
|
|
tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
|
|
|
|
tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
|
2017-09-18 16:41:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Swap the cow forks. */
|
|
|
|
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
|
|
|
|
ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
|
|
|
|
ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
|
|
|
|
|
2018-07-12 05:26:38 +00:00
|
|
|
swap(ip->i_cnextents, tip->i_cnextents);
|
|
|
|
swap(ip->i_cowfp, tip->i_cowfp);
|
2017-09-18 16:41:18 +00:00
|
|
|
|
2018-03-14 06:15:30 +00:00
|
|
|
if (ip->i_cowfp && ip->i_cowfp->if_bytes)
|
2017-09-18 16:41:18 +00:00
|
|
|
xfs_inode_set_cowblocks_tag(ip);
|
|
|
|
else
|
|
|
|
xfs_inode_clear_cowblocks_tag(ip);
|
2018-03-14 06:15:30 +00:00
|
|
|
if (tip->i_cowfp && tip->i_cowfp->if_bytes)
|
2017-09-18 16:41:18 +00:00
|
|
|
xfs_inode_set_cowblocks_tag(tip);
|
|
|
|
else
|
|
|
|
xfs_inode_clear_cowblocks_tag(tip);
|
2016-10-03 16:11:42 +00:00
|
|
|
}
|
|
|
|
|
2013-08-12 10:49:48 +00:00
|
|
|
xfs_trans_log_inode(tp, ip, src_log_flags);
|
|
|
|
xfs_trans_log_inode(tp, tip, target_log_flags);
|
|
|
|
|
2017-08-29 17:08:39 +00:00
|
|
|
/*
|
|
|
|
* The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
|
|
|
|
* have inode number owner values in the bmbt blocks that still refer to
|
|
|
|
* the old inode. Scan each bmbt to fix up the owner values with the
|
|
|
|
* inode number of the current inode.
|
|
|
|
*/
|
|
|
|
if (src_log_flags & XFS_ILOG_DOWNER) {
|
2017-08-29 17:08:40 +00:00
|
|
|
error = xfs_swap_change_owner(&tp, ip, tip);
|
2017-08-29 17:08:39 +00:00
|
|
|
if (error)
|
|
|
|
goto out_trans_cancel;
|
|
|
|
}
|
|
|
|
if (target_log_flags & XFS_ILOG_DOWNER) {
|
2017-08-29 17:08:40 +00:00
|
|
|
error = xfs_swap_change_owner(&tp, tip, ip);
|
2017-08-29 17:08:39 +00:00
|
|
|
if (error)
|
|
|
|
goto out_trans_cancel;
|
|
|
|
}
|
|
|
|
|
2013-08-12 10:49:48 +00:00
|
|
|
/*
|
|
|
|
* If this is a synchronous mount, make sure that the
|
|
|
|
* transaction goes to disk before returning to the user.
|
|
|
|
*/
|
|
|
|
if (mp->m_flags & XFS_MOUNT_WSYNC)
|
|
|
|
xfs_trans_set_sync(tp);
|
|
|
|
|
2015-06-04 03:48:08 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
2013-08-12 10:49:48 +00:00
|
|
|
|
|
|
|
trace_xfs_swap_extent_after(ip, 0);
|
|
|
|
trace_xfs_swap_extent_after(tip, 1);
|
|
|
|
|
2016-11-30 03:33:25 +00:00
|
|
|
out_unlock:
|
2014-08-04 03:29:32 +00:00
|
|
|
xfs_iunlock(ip, lock_flags);
|
|
|
|
xfs_iunlock(tip, lock_flags);
|
2016-11-30 03:33:25 +00:00
|
|
|
unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
|
2016-10-03 16:11:53 +00:00
|
|
|
return error;
|
2013-08-12 10:49:48 +00:00
|
|
|
|
|
|
|
out_trans_cancel:
|
2015-06-04 03:47:56 +00:00
|
|
|
xfs_trans_cancel(tp);
|
2016-11-30 03:33:25 +00:00
|
|
|
goto out_unlock;
|
2013-08-12 10:49:48 +00:00
|
|
|
}
|