Changes since last update:
- Fix various iomap bugs - Fix overly aggressive CoW preallocation garbage collection - Fixes to CoW endio error handling - Fix some incorrect geometry calculations - Remove a potential system hang in bulkstat - Try to allocate blocks more aggressively to reduce ENOSPC errors -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCgAGBQJYwbgtAAoJEPh/dxk0SrTrsl8P/1cyLiDirZiUc/cToZamPTNb cvCNuM/m7OkocB4KQ/CsHNfJDiDGPfrAJ8fukJAGXB+ordun2kM7iTx3HQ1+qEvb pt+znR0MKgm3dCMdey8OA9UBl85GAG47jvioITUNg6/tse5u/WAaRcjISa30z/qb xv/guqx6AYyLtQ1K5v/j67w3lmeR8b9Qu0ze7sRTn7TP3cVpFZS6TeZT/hmV/ZMp 3sG7rZFuC3c0b/b+CvyXufjDyqtIZ+yYENbmTDngyoTwOVsw66u0dZNHvV/L5RDe z1CBKZrp+PmTIWQJeSkwX26VnOxcL0sRsfareFIYLN2fKffCFAXtbrhIifuXYe5n a5tsyzd8jgOb6EHlKyA4Ls5o4Gqt5mUBEV1CCHVbcpSoGUMIBE3Vn7QrKjRaIGtF 1EbUI969LBjBdw2cOAYZ3bUIAW7AfGtNh6nLBTkT1n2ATOS15o+1l7yXN3HkEiGv xyikBREp+jV8tR1ZaBNtHnPJeKYxMVAxoMw3ZfrHFA3wPbIKQwrhTZSYavrUN5YC 6/7VyLWrt4Xy8NgzHOiHtvZCAYCzP6FwBOPALrqjOMJR5giSZ7VduV3WT2v0xJO/ Cy9TsyTdjYy/dJe54KPC4jhCKkyNEGwB3VaGwifzSUcHVnYpbIBT/gDTSRNk1+xN U2ufq3mtoi+BM8/znImL =5WKj -----END PGP SIGNATURE----- Merge tag 'xfs-4.11-fixes-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux Pull xfs fixes from Darrick Wong: "Here are some bug fixes for -rc2 to clean up the copy on write handling and to remove a cause of hangs. - Fix various iomap bugs - Fix overly aggressive CoW preallocation garbage collection - Fixes to CoW endio error handling - Fix some incorrect geometry calculations - Remove a potential system hang in bulkstat - Try to allocate blocks more aggressively to reduce ENOSPC errors" * tag 'xfs-4.11-fixes-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: xfs: try any AG when allocating the first btree block when reflinking xfs: use iomap new flag for newly allocated delalloc blocks xfs: remove kmem_zalloc_greedy xfs: Use xfs_icluster_size_fsb() to calculate inode alignment mask xfs: fix and streamline error handling in xfs_end_io xfs: only reclaim unwritten COW extents periodically iomap: invalidate page caches should be after iomap_dio_complete() in direct write
This commit is contained in:
commit
9db61d6fd6
17
fs/iomap.c
17
fs/iomap.c
@ -846,7 +846,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
||||||
struct inode *inode = file_inode(iocb->ki_filp);
|
struct inode *inode = file_inode(iocb->ki_filp);
|
||||||
size_t count = iov_iter_count(iter);
|
size_t count = iov_iter_count(iter);
|
||||||
loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0;
|
loff_t pos = iocb->ki_pos, start = pos;
|
||||||
|
loff_t end = iocb->ki_pos + count - 1, ret = 0;
|
||||||
unsigned int flags = IOMAP_DIRECT;
|
unsigned int flags = IOMAP_DIRECT;
|
||||||
struct blk_plug plug;
|
struct blk_plug plug;
|
||||||
struct iomap_dio *dio;
|
struct iomap_dio *dio;
|
||||||
@ -887,12 +888,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (mapping->nrpages) {
|
if (mapping->nrpages) {
|
||||||
ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
|
ret = filemap_write_and_wait_range(mapping, start, end);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free_dio;
|
goto out_free_dio;
|
||||||
|
|
||||||
ret = invalidate_inode_pages2_range(mapping,
|
ret = invalidate_inode_pages2_range(mapping,
|
||||||
iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
start >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
||||||
WARN_ON_ONCE(ret);
|
WARN_ON_ONCE(ret);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
@ -941,6 +942,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = iomap_dio_complete(dio);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try again to invalidate clean pages which might have been cached by
|
* Try again to invalidate clean pages which might have been cached by
|
||||||
* non-direct readahead, or faulted in by get_user_pages() if the source
|
* non-direct readahead, or faulted in by get_user_pages() if the source
|
||||||
@ -949,12 +952,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
* this invalidation fails, tough, the write still worked...
|
* this invalidation fails, tough, the write still worked...
|
||||||
*/
|
*/
|
||||||
if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
|
if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
|
||||||
ret = invalidate_inode_pages2_range(mapping,
|
int err = invalidate_inode_pages2_range(mapping,
|
||||||
iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
start >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
||||||
WARN_ON_ONCE(ret);
|
WARN_ON_ONCE(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
return iomap_dio_complete(dio);
|
return ret;
|
||||||
|
|
||||||
out_free_dio:
|
out_free_dio:
|
||||||
kfree(dio);
|
kfree(dio);
|
||||||
|
@ -25,24 +25,6 @@
|
|||||||
#include "kmem.h"
|
#include "kmem.h"
|
||||||
#include "xfs_message.h"
|
#include "xfs_message.h"
|
||||||
|
|
||||||
/*
|
|
||||||
* Greedy allocation. May fail and may return vmalloced memory.
|
|
||||||
*/
|
|
||||||
void *
|
|
||||||
kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
|
|
||||||
{
|
|
||||||
void *ptr;
|
|
||||||
size_t kmsize = maxsize;
|
|
||||||
|
|
||||||
while (!(ptr = vzalloc(kmsize))) {
|
|
||||||
if ((kmsize >>= 1) <= minsize)
|
|
||||||
kmsize = minsize;
|
|
||||||
}
|
|
||||||
if (ptr)
|
|
||||||
*size = kmsize;
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *
|
void *
|
||||||
kmem_alloc(size_t size, xfs_km_flags_t flags)
|
kmem_alloc(size_t size, xfs_km_flags_t flags)
|
||||||
{
|
{
|
||||||
|
@ -69,8 +69,6 @@ static inline void kmem_free(const void *ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
|
|
||||||
|
|
||||||
static inline void *
|
static inline void *
|
||||||
kmem_zalloc(size_t size, xfs_km_flags_t flags)
|
kmem_zalloc(size_t size, xfs_km_flags_t flags)
|
||||||
{
|
{
|
||||||
|
@ -763,8 +763,8 @@ xfs_bmap_extents_to_btree(
|
|||||||
args.type = XFS_ALLOCTYPE_START_BNO;
|
args.type = XFS_ALLOCTYPE_START_BNO;
|
||||||
args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
|
args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
|
||||||
} else if (dfops->dop_low) {
|
} else if (dfops->dop_low) {
|
||||||
try_another_ag:
|
|
||||||
args.type = XFS_ALLOCTYPE_START_BNO;
|
args.type = XFS_ALLOCTYPE_START_BNO;
|
||||||
|
try_another_ag:
|
||||||
args.fsbno = *firstblock;
|
args.fsbno = *firstblock;
|
||||||
} else {
|
} else {
|
||||||
args.type = XFS_ALLOCTYPE_NEAR_BNO;
|
args.type = XFS_ALLOCTYPE_NEAR_BNO;
|
||||||
@ -790,13 +790,17 @@ try_another_ag:
|
|||||||
if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
|
if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
|
||||||
args.fsbno == NULLFSBLOCK &&
|
args.fsbno == NULLFSBLOCK &&
|
||||||
args.type == XFS_ALLOCTYPE_NEAR_BNO) {
|
args.type == XFS_ALLOCTYPE_NEAR_BNO) {
|
||||||
dfops->dop_low = true;
|
args.type = XFS_ALLOCTYPE_FIRST_AG;
|
||||||
goto try_another_ag;
|
goto try_another_ag;
|
||||||
}
|
}
|
||||||
|
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
|
||||||
|
xfs_iroot_realloc(ip, -1, whichfork);
|
||||||
|
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
|
||||||
|
return -ENOSPC;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* Allocation can't fail, the space was reserved.
|
* Allocation can't fail, the space was reserved.
|
||||||
*/
|
*/
|
||||||
ASSERT(args.fsbno != NULLFSBLOCK);
|
|
||||||
ASSERT(*firstblock == NULLFSBLOCK ||
|
ASSERT(*firstblock == NULLFSBLOCK ||
|
||||||
args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
|
args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
|
||||||
*firstblock = cur->bc_private.b.firstblock = args.fsbno;
|
*firstblock = cur->bc_private.b.firstblock = args.fsbno;
|
||||||
@ -4150,6 +4154,19 @@ xfs_bmapi_read(
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add a delayed allocation extent to an inode. Blocks are reserved from the
|
||||||
|
* global pool and the extent inserted into the inode in-core extent tree.
|
||||||
|
*
|
||||||
|
* On entry, got refers to the first extent beyond the offset of the extent to
|
||||||
|
* allocate or eof is specified if no such extent exists. On return, got refers
|
||||||
|
* to the extent record that was inserted to the inode fork.
|
||||||
|
*
|
||||||
|
* Note that the allocated extent may have been merged with contiguous extents
|
||||||
|
* during insertion into the inode fork. Thus, got does not reflect the current
|
||||||
|
* state of the inode fork on return. If necessary, the caller can use lastx to
|
||||||
|
* look up the updated record in the inode fork.
|
||||||
|
*/
|
||||||
int
|
int
|
||||||
xfs_bmapi_reserve_delalloc(
|
xfs_bmapi_reserve_delalloc(
|
||||||
struct xfs_inode *ip,
|
struct xfs_inode *ip,
|
||||||
@ -4236,13 +4253,8 @@ xfs_bmapi_reserve_delalloc(
|
|||||||
got->br_startblock = nullstartblock(indlen);
|
got->br_startblock = nullstartblock(indlen);
|
||||||
got->br_blockcount = alen;
|
got->br_blockcount = alen;
|
||||||
got->br_state = XFS_EXT_NORM;
|
got->br_state = XFS_EXT_NORM;
|
||||||
xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
|
|
||||||
|
|
||||||
/*
|
xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
|
||||||
* Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
|
|
||||||
* might have merged it into one of the neighbouring ones.
|
|
||||||
*/
|
|
||||||
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tag the inode if blocks were preallocated. Note that COW fork
|
* Tag the inode if blocks were preallocated. Note that COW fork
|
||||||
@ -4254,10 +4266,6 @@ xfs_bmapi_reserve_delalloc(
|
|||||||
if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
|
if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
|
||||||
xfs_inode_set_cowblocks_tag(ip);
|
xfs_inode_set_cowblocks_tag(ip);
|
||||||
|
|
||||||
ASSERT(got->br_startoff <= aoff);
|
|
||||||
ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
|
|
||||||
ASSERT(isnullstartblock(got->br_startblock));
|
|
||||||
ASSERT(got->br_state == XFS_EXT_NORM);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_unreserve_blocks:
|
out_unreserve_blocks:
|
||||||
|
@ -447,8 +447,8 @@ xfs_bmbt_alloc_block(
|
|||||||
|
|
||||||
if (args.fsbno == NULLFSBLOCK) {
|
if (args.fsbno == NULLFSBLOCK) {
|
||||||
args.fsbno = be64_to_cpu(start->l);
|
args.fsbno = be64_to_cpu(start->l);
|
||||||
try_another_ag:
|
|
||||||
args.type = XFS_ALLOCTYPE_START_BNO;
|
args.type = XFS_ALLOCTYPE_START_BNO;
|
||||||
|
try_another_ag:
|
||||||
/*
|
/*
|
||||||
* Make sure there is sufficient room left in the AG to
|
* Make sure there is sufficient room left in the AG to
|
||||||
* complete a full tree split for an extent insert. If
|
* complete a full tree split for an extent insert. If
|
||||||
@ -488,8 +488,8 @@ try_another_ag:
|
|||||||
if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
|
if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
|
||||||
args.fsbno == NULLFSBLOCK &&
|
args.fsbno == NULLFSBLOCK &&
|
||||||
args.type == XFS_ALLOCTYPE_NEAR_BNO) {
|
args.type == XFS_ALLOCTYPE_NEAR_BNO) {
|
||||||
cur->bc_private.b.dfops->dop_low = true;
|
|
||||||
args.fsbno = cur->bc_private.b.firstblock;
|
args.fsbno = cur->bc_private.b.firstblock;
|
||||||
|
args.type = XFS_ALLOCTYPE_FIRST_AG;
|
||||||
goto try_another_ag;
|
goto try_another_ag;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -506,7 +506,7 @@ try_another_ag:
|
|||||||
goto error0;
|
goto error0;
|
||||||
cur->bc_private.b.dfops->dop_low = true;
|
cur->bc_private.b.dfops->dop_low = true;
|
||||||
}
|
}
|
||||||
if (args.fsbno == NULLFSBLOCK) {
|
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
|
||||||
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
|
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
|
||||||
*stat = 0;
|
*stat = 0;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -274,54 +274,49 @@ xfs_end_io(
|
|||||||
struct xfs_ioend *ioend =
|
struct xfs_ioend *ioend =
|
||||||
container_of(work, struct xfs_ioend, io_work);
|
container_of(work, struct xfs_ioend, io_work);
|
||||||
struct xfs_inode *ip = XFS_I(ioend->io_inode);
|
struct xfs_inode *ip = XFS_I(ioend->io_inode);
|
||||||
|
xfs_off_t offset = ioend->io_offset;
|
||||||
|
size_t size = ioend->io_size;
|
||||||
int error = ioend->io_bio->bi_error;
|
int error = ioend->io_bio->bi_error;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set an error if the mount has shut down and proceed with end I/O
|
* Just clean up the in-memory strutures if the fs has been shut down.
|
||||||
* processing so it can perform whatever cleanups are necessary.
|
|
||||||
*/
|
*/
|
||||||
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
|
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
|
||||||
error = -EIO;
|
error = -EIO;
|
||||||
|
goto done;
|
||||||
/*
|
|
||||||
* For a CoW extent, we need to move the mapping from the CoW fork
|
|
||||||
* to the data fork. If instead an error happened, just dump the
|
|
||||||
* new blocks.
|
|
||||||
*/
|
|
||||||
if (ioend->io_type == XFS_IO_COW) {
|
|
||||||
if (error)
|
|
||||||
goto done;
|
|
||||||
if (ioend->io_bio->bi_error) {
|
|
||||||
error = xfs_reflink_cancel_cow_range(ip,
|
|
||||||
ioend->io_offset, ioend->io_size);
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
error = xfs_reflink_end_cow(ip, ioend->io_offset,
|
|
||||||
ioend->io_size);
|
|
||||||
if (error)
|
|
||||||
goto done;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For unwritten extents we need to issue transactions to convert a
|
* Clean up any COW blocks on an I/O error.
|
||||||
* range to normal written extens after the data I/O has finished.
|
|
||||||
* Detecting and handling completion IO errors is done individually
|
|
||||||
* for each case as different cleanup operations need to be performed
|
|
||||||
* on error.
|
|
||||||
*/
|
*/
|
||||||
if (ioend->io_type == XFS_IO_UNWRITTEN) {
|
if (unlikely(error)) {
|
||||||
if (error)
|
switch (ioend->io_type) {
|
||||||
goto done;
|
case XFS_IO_COW:
|
||||||
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
|
xfs_reflink_cancel_cow_range(ip, offset, size, true);
|
||||||
ioend->io_size);
|
break;
|
||||||
} else if (ioend->io_append_trans) {
|
}
|
||||||
error = xfs_setfilesize_ioend(ioend, error);
|
|
||||||
} else {
|
goto done;
|
||||||
ASSERT(!xfs_ioend_is_append(ioend) ||
|
}
|
||||||
ioend->io_type == XFS_IO_COW);
|
|
||||||
|
/*
|
||||||
|
* Success: commit the COW or unwritten blocks if needed.
|
||||||
|
*/
|
||||||
|
switch (ioend->io_type) {
|
||||||
|
case XFS_IO_COW:
|
||||||
|
error = xfs_reflink_end_cow(ip, offset, size);
|
||||||
|
break;
|
||||||
|
case XFS_IO_UNWRITTEN:
|
||||||
|
error = xfs_iomap_write_unwritten(ip, offset, size);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
done:
|
||||||
|
if (ioend->io_append_trans)
|
||||||
|
error = xfs_setfilesize_ioend(ioend, error);
|
||||||
xfs_destroy_ioend(ioend, error);
|
xfs_destroy_ioend(ioend, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1608,7 +1608,7 @@ xfs_inode_free_cowblocks(
|
|||||||
xfs_ilock(ip, XFS_IOLOCK_EXCL);
|
xfs_ilock(ip, XFS_IOLOCK_EXCL);
|
||||||
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
|
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
|
||||||
|
|
||||||
ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
|
ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
|
||||||
|
|
||||||
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
|
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
|
||||||
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
||||||
|
@ -1615,7 +1615,7 @@ xfs_itruncate_extents(
|
|||||||
|
|
||||||
/* Remove all pending CoW reservations. */
|
/* Remove all pending CoW reservations. */
|
||||||
error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
|
error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
|
||||||
last_block);
|
last_block, true);
|
||||||
if (error)
|
if (error)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -630,6 +630,11 @@ retry:
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
|
||||||
|
* them out if the write happens to fail.
|
||||||
|
*/
|
||||||
|
iomap->flags = IOMAP_F_NEW;
|
||||||
trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
|
trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
|
||||||
done:
|
done:
|
||||||
if (isnullstartblock(got.br_startblock))
|
if (isnullstartblock(got.br_startblock))
|
||||||
@ -1071,16 +1076,22 @@ xfs_file_iomap_end_delalloc(
|
|||||||
struct xfs_inode *ip,
|
struct xfs_inode *ip,
|
||||||
loff_t offset,
|
loff_t offset,
|
||||||
loff_t length,
|
loff_t length,
|
||||||
ssize_t written)
|
ssize_t written,
|
||||||
|
struct iomap *iomap)
|
||||||
{
|
{
|
||||||
struct xfs_mount *mp = ip->i_mount;
|
struct xfs_mount *mp = ip->i_mount;
|
||||||
xfs_fileoff_t start_fsb;
|
xfs_fileoff_t start_fsb;
|
||||||
xfs_fileoff_t end_fsb;
|
xfs_fileoff_t end_fsb;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
/* behave as if the write failed if drop writes is enabled */
|
/*
|
||||||
if (xfs_mp_drop_writes(mp))
|
* Behave as if the write failed if drop writes is enabled. Set the NEW
|
||||||
|
* flag to force delalloc cleanup.
|
||||||
|
*/
|
||||||
|
if (xfs_mp_drop_writes(mp)) {
|
||||||
|
iomap->flags |= IOMAP_F_NEW;
|
||||||
written = 0;
|
written = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* start_fsb refers to the first unused block after a short write. If
|
* start_fsb refers to the first unused block after a short write. If
|
||||||
@ -1094,14 +1105,14 @@ xfs_file_iomap_end_delalloc(
|
|||||||
end_fsb = XFS_B_TO_FSB(mp, offset + length);
|
end_fsb = XFS_B_TO_FSB(mp, offset + length);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Trim back delalloc blocks if we didn't manage to write the whole
|
* Trim delalloc blocks if they were allocated by this write and we
|
||||||
* range reserved.
|
* didn't manage to write the whole range.
|
||||||
*
|
*
|
||||||
* We don't need to care about racing delalloc as we hold i_mutex
|
* We don't need to care about racing delalloc as we hold i_mutex
|
||||||
* across the reserve/allocate/unreserve calls. If there are delalloc
|
* across the reserve/allocate/unreserve calls. If there are delalloc
|
||||||
* blocks in the range, they are ours.
|
* blocks in the range, they are ours.
|
||||||
*/
|
*/
|
||||||
if (start_fsb < end_fsb) {
|
if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
|
||||||
truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
|
truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
|
||||||
XFS_FSB_TO_B(mp, end_fsb) - 1);
|
XFS_FSB_TO_B(mp, end_fsb) - 1);
|
||||||
|
|
||||||
@ -1131,7 +1142,7 @@ xfs_file_iomap_end(
|
|||||||
{
|
{
|
||||||
if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
|
if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
|
||||||
return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
|
return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
|
||||||
length, written);
|
length, written, iomap);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -361,7 +361,6 @@ xfs_bulkstat(
|
|||||||
xfs_agino_t agino; /* inode # in allocation group */
|
xfs_agino_t agino; /* inode # in allocation group */
|
||||||
xfs_agnumber_t agno; /* allocation group number */
|
xfs_agnumber_t agno; /* allocation group number */
|
||||||
xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
|
xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
|
||||||
size_t irbsize; /* size of irec buffer in bytes */
|
|
||||||
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
|
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
|
||||||
int nirbuf; /* size of irbuf */
|
int nirbuf; /* size of irbuf */
|
||||||
int ubcount; /* size of user's buffer */
|
int ubcount; /* size of user's buffer */
|
||||||
@ -388,11 +387,10 @@ xfs_bulkstat(
|
|||||||
*ubcountp = 0;
|
*ubcountp = 0;
|
||||||
*done = 0;
|
*done = 0;
|
||||||
|
|
||||||
irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
|
irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP);
|
||||||
if (!irbuf)
|
if (!irbuf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf);
|
||||||
nirbuf = irbsize / sizeof(*irbuf);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Loop over the allocation groups, starting from the last
|
* Loop over the allocation groups, starting from the last
|
||||||
|
@ -513,8 +513,7 @@ STATIC void
|
|||||||
xfs_set_inoalignment(xfs_mount_t *mp)
|
xfs_set_inoalignment(xfs_mount_t *mp)
|
||||||
{
|
{
|
||||||
if (xfs_sb_version_hasalign(&mp->m_sb) &&
|
if (xfs_sb_version_hasalign(&mp->m_sb) &&
|
||||||
mp->m_sb.sb_inoalignmt >=
|
mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
|
||||||
XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
|
|
||||||
mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
|
mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
|
||||||
else
|
else
|
||||||
mp->m_inoalign_mask = 0;
|
mp->m_inoalign_mask = 0;
|
||||||
|
@ -548,14 +548,18 @@ xfs_reflink_trim_irec_to_next_cow(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Cancel all pending CoW reservations for some block range of an inode.
|
* Cancel CoW reservations for some block range of an inode.
|
||||||
|
*
|
||||||
|
* If cancel_real is true this function cancels all COW fork extents for the
|
||||||
|
* inode; if cancel_real is false, real extents are not cleared.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
xfs_reflink_cancel_cow_blocks(
|
xfs_reflink_cancel_cow_blocks(
|
||||||
struct xfs_inode *ip,
|
struct xfs_inode *ip,
|
||||||
struct xfs_trans **tpp,
|
struct xfs_trans **tpp,
|
||||||
xfs_fileoff_t offset_fsb,
|
xfs_fileoff_t offset_fsb,
|
||||||
xfs_fileoff_t end_fsb)
|
xfs_fileoff_t end_fsb,
|
||||||
|
bool cancel_real)
|
||||||
{
|
{
|
||||||
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
|
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
|
||||||
struct xfs_bmbt_irec got, del;
|
struct xfs_bmbt_irec got, del;
|
||||||
@ -579,7 +583,7 @@ xfs_reflink_cancel_cow_blocks(
|
|||||||
&idx, &got, &del);
|
&idx, &got, &del);
|
||||||
if (error)
|
if (error)
|
||||||
break;
|
break;
|
||||||
} else {
|
} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
|
||||||
xfs_trans_ijoin(*tpp, ip, 0);
|
xfs_trans_ijoin(*tpp, ip, 0);
|
||||||
xfs_defer_init(&dfops, &firstfsb);
|
xfs_defer_init(&dfops, &firstfsb);
|
||||||
|
|
||||||
@ -621,13 +625,17 @@ xfs_reflink_cancel_cow_blocks(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Cancel all pending CoW reservations for some byte range of an inode.
|
* Cancel CoW reservations for some byte range of an inode.
|
||||||
|
*
|
||||||
|
* If cancel_real is true this function cancels all COW fork extents for the
|
||||||
|
* inode; if cancel_real is false, real extents are not cleared.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
xfs_reflink_cancel_cow_range(
|
xfs_reflink_cancel_cow_range(
|
||||||
struct xfs_inode *ip,
|
struct xfs_inode *ip,
|
||||||
xfs_off_t offset,
|
xfs_off_t offset,
|
||||||
xfs_off_t count)
|
xfs_off_t count,
|
||||||
|
bool cancel_real)
|
||||||
{
|
{
|
||||||
struct xfs_trans *tp;
|
struct xfs_trans *tp;
|
||||||
xfs_fileoff_t offset_fsb;
|
xfs_fileoff_t offset_fsb;
|
||||||
@ -653,7 +661,8 @@ xfs_reflink_cancel_cow_range(
|
|||||||
xfs_trans_ijoin(tp, ip, 0);
|
xfs_trans_ijoin(tp, ip, 0);
|
||||||
|
|
||||||
/* Scrape out the old CoW reservations */
|
/* Scrape out the old CoW reservations */
|
||||||
error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb);
|
error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
|
||||||
|
cancel_real);
|
||||||
if (error)
|
if (error)
|
||||||
goto out_cancel;
|
goto out_cancel;
|
||||||
|
|
||||||
@ -1450,7 +1459,7 @@ next:
|
|||||||
* We didn't find any shared blocks so turn off the reflink flag.
|
* We didn't find any shared blocks so turn off the reflink flag.
|
||||||
* First, get rid of any leftover CoW mappings.
|
* First, get rid of any leftover CoW mappings.
|
||||||
*/
|
*/
|
||||||
error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF);
|
error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
|
@ -39,9 +39,9 @@ extern void xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
|
|||||||
|
|
||||||
extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
|
extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
|
||||||
struct xfs_trans **tpp, xfs_fileoff_t offset_fsb,
|
struct xfs_trans **tpp, xfs_fileoff_t offset_fsb,
|
||||||
xfs_fileoff_t end_fsb);
|
xfs_fileoff_t end_fsb, bool cancel_real);
|
||||||
extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
|
extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
|
||||||
xfs_off_t count);
|
xfs_off_t count, bool cancel_real);
|
||||||
extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
|
extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
|
||||||
xfs_off_t count);
|
xfs_off_t count);
|
||||||
extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
|
extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
|
||||||
|
@ -953,7 +953,7 @@ xfs_fs_destroy_inode(
|
|||||||
XFS_STATS_INC(ip->i_mount, vn_remove);
|
XFS_STATS_INC(ip->i_mount, vn_remove);
|
||||||
|
|
||||||
if (xfs_is_reflink_inode(ip)) {
|
if (xfs_is_reflink_inode(ip)) {
|
||||||
error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
|
error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
|
||||||
if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount))
|
if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount))
|
||||||
xfs_warn(ip->i_mount,
|
xfs_warn(ip->i_mount,
|
||||||
"Error %d while evicting CoW blocks for inode %llu.",
|
"Error %d while evicting CoW blocks for inode %llu.",
|
||||||
|
Loading…
Reference in New Issue
Block a user