2018-06-06 02:42:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2006-06-09 04:48:12 +00:00
|
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
2018-07-12 05:26:06 +00:00
|
|
|
* Copyright (c) 2016-2018 Christoph Hellwig.
|
2005-11-02 03:58:39 +00:00
|
|
|
* All Rights Reserved.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2013-10-22 23:36:05 +00:00
|
|
|
#include "xfs_shared.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_inode.h"
|
2005-11-02 03:38:42 +00:00
|
|
|
#include "xfs_btree.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_bmap_btree.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_bmap.h"
|
2013-08-12 10:49:42 +00:00
|
|
|
#include "xfs_bmap_util.h"
|
2017-10-31 19:04:49 +00:00
|
|
|
#include "xfs_errortag.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_error.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_trans.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_trans_space.h"
|
2017-11-01 15:36:47 +00:00
|
|
|
#include "xfs_inode_item.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_iomap.h"
|
2009-12-14 23:14:59 +00:00
|
|
|
#include "xfs_trace.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_quota.h"
|
2013-03-18 14:51:47 +00:00
|
|
|
#include "xfs_dquot_item.h"
|
|
|
|
#include "xfs_dquot.h"
|
2016-10-03 16:11:33 +00:00
|
|
|
#include "xfs_reflink.h"
|
2024-02-22 20:31:51 +00:00
|
|
|
#include "xfs_health.h"
|
2024-04-22 11:20:16 +00:00
|
|
|
#include "xfs_rtbitmap.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-10-28 15:41:44 +00:00
|
|
|
#define XFS_ALLOC_ALIGN(mp, off) \
|
|
|
|
(((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-02-18 17:38:46 +00:00
|
|
|
static int
|
|
|
|
xfs_alert_fsblock_zero(
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
xfs_bmbt_irec_t *imap)
|
|
|
|
{
|
|
|
|
xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
|
|
|
|
"Access to block zero in inode %llu "
|
|
|
|
"start_block: %llx start_off: %llx "
|
|
|
|
"blkcnt: %llx extent-state: %x",
|
|
|
|
(unsigned long long)ip->i_ino,
|
|
|
|
(unsigned long long)imap->br_startblock,
|
|
|
|
(unsigned long long)imap->br_startoff,
|
|
|
|
(unsigned long long)imap->br_blockcount,
|
|
|
|
imap->br_state);
|
2024-02-22 20:31:51 +00:00
|
|
|
xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
|
2019-02-18 17:38:46 +00:00
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
|
2022-11-28 22:09:17 +00:00
|
|
|
u64
|
|
|
|
xfs_iomap_inode_sequence(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
u16 iomap_flags)
|
|
|
|
{
|
|
|
|
u64 cookie = 0;
|
|
|
|
|
|
|
|
if (iomap_flags & IOMAP_F_XATTR)
|
|
|
|
return READ_ONCE(ip->i_af.if_seq);
|
|
|
|
if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp)
|
|
|
|
cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32;
|
|
|
|
return cookie | READ_ONCE(ip->i_df.if_seq);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that the iomap passed to us is still valid for the given offset and
|
|
|
|
* length.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
xfs_iomap_valid(
|
|
|
|
struct inode *inode,
|
|
|
|
const struct iomap *iomap)
|
|
|
|
{
|
2022-11-29 01:24:36 +00:00
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
|
|
|
|
if (iomap->validity_cookie !=
|
|
|
|
xfs_iomap_inode_sequence(ip, iomap->flags)) {
|
|
|
|
trace_xfs_iomap_invalid(ip, iomap);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
XFS_ERRORTAG_DELAY(ip->i_mount, XFS_ERRTAG_WRITE_DELAY_MS);
|
|
|
|
return true;
|
2022-11-28 22:09:17 +00:00
|
|
|
}
|
|
|
|
|
2023-01-15 16:50:44 +00:00
|
|
|
static const struct iomap_folio_ops xfs_iomap_folio_ops = {
|
2022-11-28 22:09:17 +00:00
|
|
|
.iomap_valid = xfs_iomap_valid,
|
|
|
|
};
|
|
|
|
|
2019-02-18 17:38:46 +00:00
|
|
|
int
|
2016-09-19 01:09:12 +00:00
|
|
|
xfs_bmbt_to_iomap(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct iomap *iomap,
|
2019-02-18 17:38:46 +00:00
|
|
|
struct xfs_bmbt_irec *imap,
|
2021-11-29 10:21:57 +00:00
|
|
|
unsigned int mapping_flags,
|
2022-11-28 22:09:17 +00:00
|
|
|
u16 iomap_flags,
|
|
|
|
u64 sequence_cookie)
|
2016-09-19 01:09:12 +00:00
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2019-10-25 05:25:38 +00:00
|
|
|
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
|
2016-09-19 01:09:12 +00:00
|
|
|
|
2024-02-22 20:31:51 +00:00
|
|
|
if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
|
|
|
|
xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
|
2019-02-18 17:38:46 +00:00
|
|
|
return xfs_alert_fsblock_zero(ip, imap);
|
2024-02-22 20:31:51 +00:00
|
|
|
}
|
2019-02-18 17:38:46 +00:00
|
|
|
|
2016-09-19 01:09:12 +00:00
|
|
|
if (imap->br_startblock == HOLESTARTBLOCK) {
|
2017-10-01 21:55:54 +00:00
|
|
|
iomap->addr = IOMAP_NULL_ADDR;
|
2016-09-19 01:09:12 +00:00
|
|
|
iomap->type = IOMAP_HOLE;
|
2019-02-18 17:38:46 +00:00
|
|
|
} else if (imap->br_startblock == DELAYSTARTBLOCK ||
|
|
|
|
isnullstartblock(imap->br_startblock)) {
|
2017-10-01 21:55:54 +00:00
|
|
|
iomap->addr = IOMAP_NULL_ADDR;
|
2016-09-19 01:09:12 +00:00
|
|
|
iomap->type = IOMAP_DELALLOC;
|
|
|
|
} else {
|
2017-10-01 21:55:54 +00:00
|
|
|
iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
|
2021-11-29 10:22:00 +00:00
|
|
|
if (mapping_flags & IOMAP_DAX)
|
|
|
|
iomap->addr += target->bt_dax_part_off;
|
|
|
|
|
2016-09-19 01:09:12 +00:00
|
|
|
if (imap->br_state == XFS_EXT_UNWRITTEN)
|
|
|
|
iomap->type = IOMAP_UNWRITTEN;
|
|
|
|
else
|
|
|
|
iomap->type = IOMAP_MAPPED;
|
2021-11-29 10:22:00 +00:00
|
|
|
|
2016-09-19 01:09:12 +00:00
|
|
|
}
|
|
|
|
iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
|
|
|
|
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
|
2021-11-29 10:22:00 +00:00
|
|
|
if (mapping_flags & IOMAP_DAX)
|
|
|
|
iomap->dax_dev = target->bt_daxdev;
|
|
|
|
else
|
|
|
|
iomap->bdev = target->bt_bdev;
|
2021-11-29 10:21:57 +00:00
|
|
|
iomap->flags = iomap_flags;
|
2019-02-18 17:38:46 +00:00
|
|
|
|
|
|
|
if (xfs_ipincount(ip) &&
|
|
|
|
(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
|
|
|
|
iomap->flags |= IOMAP_F_DIRTY;
|
2022-11-28 22:09:17 +00:00
|
|
|
|
|
|
|
iomap->validity_cookie = sequence_cookie;
|
2023-01-15 16:50:44 +00:00
|
|
|
iomap->folio_ops = &xfs_iomap_folio_ops;
|
2019-02-18 17:38:46 +00:00
|
|
|
return 0;
|
2016-09-19 01:09:12 +00:00
|
|
|
}
|
|
|
|
|
2018-10-18 06:19:26 +00:00
|
|
|
static void
|
|
|
|
xfs_hole_to_iomap(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct iomap *iomap,
|
|
|
|
xfs_fileoff_t offset_fsb,
|
|
|
|
xfs_fileoff_t end_fsb)
|
|
|
|
{
|
2019-10-25 05:25:38 +00:00
|
|
|
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
|
|
|
|
|
2018-10-18 06:19:26 +00:00
|
|
|
iomap->addr = IOMAP_NULL_ADDR;
|
|
|
|
iomap->type = IOMAP_HOLE;
|
|
|
|
iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
|
|
|
|
iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
|
2019-10-25 05:25:38 +00:00
|
|
|
iomap->bdev = target->bt_bdev;
|
|
|
|
iomap->dax_dev = target->bt_daxdev;
|
2018-10-18 06:19:26 +00:00
|
|
|
}
|
|
|
|
|
2019-10-19 16:09:44 +00:00
|
|
|
static inline xfs_fileoff_t
|
|
|
|
xfs_iomap_end_fsb(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t count)
|
|
|
|
{
|
|
|
|
ASSERT(offset <= mp->m_super->s_maxbytes);
|
|
|
|
return min(XFS_B_TO_FSB(mp, offset + count),
|
|
|
|
XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
|
|
|
|
}
|
|
|
|
|
2019-10-30 19:24:57 +00:00
|
|
|
static xfs_extlen_t
|
2016-09-19 01:09:28 +00:00
|
|
|
xfs_eof_alignment(
|
2019-10-30 19:24:58 +00:00
|
|
|
struct xfs_inode *ip)
|
2006-01-11 04:28:28 +00:00
|
|
|
{
|
2016-09-19 01:09:28 +00:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
xfs_extlen_t align = 0;
|
2006-01-11 04:28:28 +00:00
|
|
|
|
2011-12-18 20:00:05 +00:00
|
|
|
if (!XFS_IS_REALTIME_INODE(ip)) {
|
|
|
|
/*
|
|
|
|
* Round up the allocation request to a stripe unit
|
|
|
|
* (m_dalign) boundary if the file size is >= stripe unit
|
|
|
|
* size, and we are allocating past the allocation eof.
|
|
|
|
*
|
|
|
|
* If mounted with the "-o swalloc" option the alignment is
|
|
|
|
* increased from the strip unit size to the stripe width.
|
|
|
|
*/
|
2021-08-19 01:46:52 +00:00
|
|
|
if (mp->m_swidth && xfs_has_swalloc(mp))
|
2011-12-18 20:00:05 +00:00
|
|
|
align = mp->m_swidth;
|
|
|
|
else if (mp->m_dalign)
|
|
|
|
align = mp->m_dalign;
|
|
|
|
|
2014-12-03 22:30:51 +00:00
|
|
|
if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
|
|
|
|
align = 0;
|
2011-12-18 20:00:05 +00:00
|
|
|
}
|
2006-01-11 04:28:28 +00:00
|
|
|
|
2016-09-19 01:09:28 +00:00
|
|
|
return align;
|
|
|
|
}
|
|
|
|
|
2019-10-30 19:24:57 +00:00
|
|
|
/*
|
|
|
|
* Check if last_fsb is outside the last extent, and if so grow it to the next
|
|
|
|
* stripe unit boundary.
|
|
|
|
*/
|
2019-10-30 19:24:59 +00:00
|
|
|
xfs_fileoff_t
|
2016-09-19 01:09:28 +00:00
|
|
|
xfs_iomap_eof_align_last_fsb(
|
|
|
|
struct xfs_inode *ip,
|
2019-10-30 19:24:57 +00:00
|
|
|
xfs_fileoff_t end_fsb)
|
2016-09-19 01:09:28 +00:00
|
|
|
{
|
2022-07-09 17:56:05 +00:00
|
|
|
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
|
2019-10-30 19:24:57 +00:00
|
|
|
xfs_extlen_t extsz = xfs_get_extsz_hint(ip);
|
2019-10-30 19:24:58 +00:00
|
|
|
xfs_extlen_t align = xfs_eof_alignment(ip);
|
2019-10-30 19:24:57 +00:00
|
|
|
struct xfs_bmbt_irec irec;
|
|
|
|
struct xfs_iext_cursor icur;
|
|
|
|
|
2021-04-13 18:15:12 +00:00
|
|
|
ASSERT(!xfs_need_iread_extents(ifp));
|
2016-09-19 01:09:28 +00:00
|
|
|
|
2019-10-30 19:24:58 +00:00
|
|
|
/*
|
|
|
|
* Always round up the allocation request to the extent hint boundary.
|
|
|
|
*/
|
|
|
|
if (extsz) {
|
|
|
|
if (align)
|
|
|
|
align = roundup_64(align, extsz);
|
|
|
|
else
|
|
|
|
align = extsz;
|
|
|
|
}
|
|
|
|
|
2014-12-03 22:30:51 +00:00
|
|
|
if (align) {
|
2019-10-30 19:24:57 +00:00
|
|
|
xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align);
|
|
|
|
|
|
|
|
xfs_iext_last(ifp, &icur);
|
|
|
|
if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
|
|
|
|
aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
|
|
|
|
return aligned_end_fsb;
|
2006-01-11 04:28:28 +00:00
|
|
|
}
|
2019-10-30 19:24:57 +00:00
|
|
|
|
|
|
|
return end_fsb;
|
2006-01-11 04:28:28 +00:00
|
|
|
}
|
|
|
|
|
2010-12-10 08:42:20 +00:00
|
|
|
int
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_iomap_write_direct(
|
2019-10-30 19:24:59 +00:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_fileoff_t offset_fsb,
|
|
|
|
xfs_fileoff_t count_fsb,
|
2021-11-29 10:21:58 +00:00
|
|
|
unsigned int flags,
|
2022-11-28 22:09:17 +00:00
|
|
|
struct xfs_bmbt_irec *imap,
|
|
|
|
u64 *seq)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-10-30 19:24:59 +00:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_trans *tp;
|
|
|
|
xfs_filblks_t resaligned;
|
|
|
|
int nimaps;
|
2021-01-27 01:20:42 +00:00
|
|
|
unsigned int dblocks, rblocks;
|
2021-01-27 00:44:07 +00:00
|
|
|
bool force = false;
|
2019-10-30 19:24:59 +00:00
|
|
|
int error;
|
|
|
|
int bmapi_flags = XFS_BMAPI_PREALLOC;
|
2021-03-25 18:48:18 +00:00
|
|
|
int nr_exts = XFS_IEXT_ADD_NOSPLIT_CNT;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-11 04:28:28 +00:00
|
|
|
ASSERT(count_fsb > 0);
|
|
|
|
|
2019-10-30 19:24:59 +00:00
|
|
|
resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
|
|
|
|
xfs_get_extsz_hint(ip));
|
|
|
|
if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
|
2021-01-27 01:20:42 +00:00
|
|
|
dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
|
|
|
|
rblocks = resaligned;
|
2007-06-18 06:50:27 +00:00
|
|
|
} else {
|
2021-01-27 01:20:42 +00:00
|
|
|
dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
|
|
|
|
rblocks = 0;
|
2007-06-18 06:50:27 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-05-04 22:30:21 +00:00
|
|
|
error = xfs_qm_dqattach(ip);
|
2015-10-12 04:34:20 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
xfs: Don't use unwritten extents for DAX
DAX has a page fault serialisation problem with block allocation.
Because it allows concurrent page faults and does not have a page
lock to serialise faults to the same page, it can get two concurrent
faults to the page that race.
When two read faults race, this isn't a huge problem as the data
underlying the page is not changing and so "detect and drop" works
just fine. The issues are to do with write faults.
When two write faults occur, we serialise block allocation in
get_blocks() so only one faul will allocate the extent. It will,
however, be marked as an unwritten extent, and that is where the
problem lies - the DAX fault code cannot differentiate between a
block that was just allocated and a block that was preallocated and
needs zeroing. The result is that both write faults end up zeroing
the block and attempting to convert it back to written.
The problem is that the first fault can zero and convert before the
second fault starts zeroing, resulting in the zeroing for the second
fault overwriting the data that the first fault wrote with zeros.
The second fault then attempts to convert the unwritten extent,
which is then a no-op because it's already written. Data loss occurs
as a result of this race.
Because there is no sane locking construct in the page fault code
that we can use for serialisation across the page faults, we need to
ensure block allocation and zeroing occurs atomically in the
filesystem. This means we can still take concurrent page faults and
the only time they will serialise is in the filesystem
mapping/allocation callback. The page fault code will always see
written, initialised extents, so we will be able to remove the
unwritten extent handling from the DAX code when all filesystems are
converted.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 01:37:00 +00:00
|
|
|
/*
|
|
|
|
* For DAX, we do not allocate unwritten extents, but instead we zero
|
|
|
|
* the block before we commit the transaction. Ideally we'd like to do
|
|
|
|
* this outside the transaction context, but if we commit and then crash
|
|
|
|
* we may not have zeroed the blocks and this will be exposed on
|
|
|
|
* recovery of the allocation. Hence we must zero before commit.
|
2016-01-04 05:22:45 +00:00
|
|
|
*
|
xfs: Don't use unwritten extents for DAX
DAX has a page fault serialisation problem with block allocation.
Because it allows concurrent page faults and does not have a page
lock to serialise faults to the same page, it can get two concurrent
faults to the page that race.
When two read faults race, this isn't a huge problem as the data
underlying the page is not changing and so "detect and drop" works
just fine. The issues are to do with write faults.
When two write faults occur, we serialise block allocation in
get_blocks() so only one faul will allocate the extent. It will,
however, be marked as an unwritten extent, and that is where the
problem lies - the DAX fault code cannot differentiate between a
block that was just allocated and a block that was preallocated and
needs zeroing. The result is that both write faults end up zeroing
the block and attempting to convert it back to written.
The problem is that the first fault can zero and convert before the
second fault starts zeroing, resulting in the zeroing for the second
fault overwriting the data that the first fault wrote with zeros.
The second fault then attempts to convert the unwritten extent,
which is then a no-op because it's already written. Data loss occurs
as a result of this race.
Because there is no sane locking construct in the page fault code
that we can use for serialisation across the page faults, we need to
ensure block allocation and zeroing occurs atomically in the
filesystem. This means we can still take concurrent page faults and
the only time they will serialise is in the filesystem
mapping/allocation callback. The page fault code will always see
written, initialised extents, so we will be able to remove the
unwritten extent handling from the DAX code when all filesystems are
converted.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 01:37:00 +00:00
|
|
|
* Further, if we are mapping unwritten extents here, we need to zero
|
|
|
|
* and convert them to written so that we don't need an unwritten extent
|
|
|
|
* callback for DAX. This also means that we need to be able to dip into
|
2016-01-04 05:22:45 +00:00
|
|
|
* the reserve block pool for bmbt block allocation if there is no space
|
|
|
|
* left but we need to do unwritten extent conversion.
|
xfs: Don't use unwritten extents for DAX
DAX has a page fault serialisation problem with block allocation.
Because it allows concurrent page faults and does not have a page
lock to serialise faults to the same page, it can get two concurrent
faults to the page that race.
When two read faults race, this isn't a huge problem as the data
underlying the page is not changing and so "detect and drop" works
just fine. The issues are to do with write faults.
When two write faults occur, we serialise block allocation in
get_blocks() so only one faul will allocate the extent. It will,
however, be marked as an unwritten extent, and that is where the
problem lies - the DAX fault code cannot differentiate between a
block that was just allocated and a block that was preallocated and
needs zeroing. The result is that both write faults end up zeroing
the block and attempting to convert it back to written.
The problem is that the first fault can zero and convert before the
second fault starts zeroing, resulting in the zeroing for the second
fault overwriting the data that the first fault wrote with zeros.
The second fault then attempts to convert the unwritten extent,
which is then a no-op because it's already written. Data loss occurs
as a result of this race.
Because there is no sane locking construct in the page fault code
that we can use for serialisation across the page faults, we need to
ensure block allocation and zeroing occurs atomically in the
filesystem. This means we can still take concurrent page faults and
the only time they will serialise is in the filesystem
mapping/allocation callback. The page fault code will always see
written, initialised extents, so we will be able to remove the
unwritten extent handling from the DAX code when all filesystems are
converted.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 01:37:00 +00:00
|
|
|
*/
|
2021-11-29 10:21:58 +00:00
|
|
|
if (flags & IOMAP_DAX) {
|
xfs: Don't use unwritten extents for DAX
DAX has a page fault serialisation problem with block allocation.
Because it allows concurrent page faults and does not have a page
lock to serialise faults to the same page, it can get two concurrent
faults to the page that race.
When two read faults race, this isn't a huge problem as the data
underlying the page is not changing and so "detect and drop" works
just fine. The issues are to do with write faults.
When two write faults occur, we serialise block allocation in
get_blocks() so only one faul will allocate the extent. It will,
however, be marked as an unwritten extent, and that is where the
problem lies - the DAX fault code cannot differentiate between a
block that was just allocated and a block that was preallocated and
needs zeroing. The result is that both write faults end up zeroing
the block and attempting to convert it back to written.
The problem is that the first fault can zero and convert before the
second fault starts zeroing, resulting in the zeroing for the second
fault overwriting the data that the first fault wrote with zeros.
The second fault then attempts to convert the unwritten extent,
which is then a no-op because it's already written. Data loss occurs
as a result of this race.
Because there is no sane locking construct in the page fault code
that we can use for serialisation across the page faults, we need to
ensure block allocation and zeroing occurs atomically in the
filesystem. This means we can still take concurrent page faults and
the only time they will serialise is in the filesystem
mapping/allocation callback. The page fault code will always see
written, initialised extents, so we will be able to remove the
unwritten extent handling from the DAX code when all filesystems are
converted.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 01:37:00 +00:00
|
|
|
bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
|
2017-03-28 21:53:36 +00:00
|
|
|
if (imap->br_state == XFS_EXT_UNWRITTEN) {
|
2021-01-27 01:20:42 +00:00
|
|
|
force = true;
|
2021-03-25 18:48:18 +00:00
|
|
|
nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT;
|
2021-01-27 01:20:42 +00:00
|
|
|
dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
|
2016-01-04 05:22:45 +00:00
|
|
|
}
|
xfs: Don't use unwritten extents for DAX
DAX has a page fault serialisation problem with block allocation.
Because it allows concurrent page faults and does not have a page
lock to serialise faults to the same page, it can get two concurrent
faults to the page that race.
When two read faults race, this isn't a huge problem as the data
underlying the page is not changing and so "detect and drop" works
just fine. The issues are to do with write faults.
When two write faults occur, we serialise block allocation in
get_blocks() so only one faul will allocate the extent. It will,
however, be marked as an unwritten extent, and that is where the
problem lies - the DAX fault code cannot differentiate between a
block that was just allocated and a block that was preallocated and
needs zeroing. The result is that both write faults end up zeroing
the block and attempting to convert it back to written.
The problem is that the first fault can zero and convert before the
second fault starts zeroing, resulting in the zeroing for the second
fault overwriting the data that the first fault wrote with zeros.
The second fault then attempts to convert the unwritten extent,
which is then a no-op because it's already written. Data loss occurs
as a result of this race.
Because there is no sane locking construct in the page fault code
that we can use for serialisation across the page faults, we need to
ensure block allocation and zeroing occurs atomically in the
filesystem. This means we can still take concurrent page faults and
the only time they will serialise is in the filesystem
mapping/allocation callback. The page fault code will always see
written, initialised extents, so we will be able to remove the
unwritten extent handling from the DAX code when all filesystems are
converted.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-11-03 01:37:00 +00:00
|
|
|
}
|
2012-03-27 14:34:50 +00:00
|
|
|
|
2021-01-27 00:44:07 +00:00
|
|
|
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks,
|
|
|
|
rblocks, force, &tp);
|
2016-04-05 23:19:55 +00:00
|
|
|
if (error)
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2012-03-27 14:34:50 +00:00
|
|
|
|
2024-05-02 07:33:55 +00:00
|
|
|
error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, nr_exts);
|
2006-01-11 04:28:28 +00:00
|
|
|
if (error)
|
2012-03-27 14:34:50 +00:00
|
|
|
goto out_trans_cancel;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2010-06-24 01:42:19 +00:00
|
|
|
* From this point onwards we overwrite the imap pointer that the
|
|
|
|
* caller gave to us.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-06-21 05:48:47 +00:00
|
|
|
nimaps = 1;
|
xfs: don't set bmapi total block req where minleft is
xfs_bmapi_write() takes a total block requirement parameter that is
passed down to the block allocation code and is used to specify the
total block requirement of the associated transaction. This is used
to try and select an AG that can not only satisfy the requested
extent allocation, but can also accommodate subsequent allocations
that might be required to complete the transaction. For example,
additional bmbt block allocations may be required on insertion of
the resulting extent to an inode data fork.
While it's important for callers to calculate and reserve such extra
blocks in the transaction, it is not necessary to pass the total
value to xfs_bmapi_write() in all cases. The latter automatically
sets minleft to ensure that sufficient free blocks remain after the
allocation attempt to expand the format of the associated inode
(i.e., such as extent to btree conversion, btree splits, etc).
Therefore, any callers that pass a total block requirement of the
bmap mapping length plus worst case bmbt expansion essentially
specify the additional reservation requirement twice. These callers
can pass a total of zero to rely on the bmapi minleft policy.
Beyond being superfluous, the primary motivation for this change is
that the total reservation logic in the bmbt code is dubious in
scenarios where minlen < maxlen and a maxlen extent cannot be
allocated (which is more common for data extent allocations where
contiguity is not required). The total value is based on maxlen in
the xfs_bmapi_write() caller. If the bmbt code falls back to an
allocation between minlen and maxlen, that allocation will not
succeed until total is reset to minlen, which essentially throws
away any additional reservation included in total by the caller. In
addition, the total value is not reset until after alignment is
dropped, which means that such callers drop alignment far too
aggressively than necessary.
Update all callers of xfs_bmapi_write() that pass a total block
value of the mapping length plus bmbt reservation to instead pass
zero and rely on xfs_bmapi_minleft() to enforce the bmbt reservation
requirement. This trades off slightly less conservative AG selection
for the ability to preserve alignment in more scenarios.
xfs_bmapi_write() callers that incorporate unrelated or additional
reservations in total beyond what is already included in minleft
must continue to use the former.
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-10-21 16:26:48 +00:00
|
|
|
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
|
|
|
|
imap, &nimaps);
|
2005-06-21 05:48:47 +00:00
|
|
|
if (error)
|
2021-01-27 01:23:30 +00:00
|
|
|
goto out_trans_cancel;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2005-06-21 05:48:47 +00:00
|
|
|
* Complete the transaction
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2015-06-04 03:48:08 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
2005-06-21 05:48:47 +00:00
|
|
|
if (error)
|
2012-03-27 14:34:50 +00:00
|
|
|
goto out_unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2024-02-22 20:31:51 +00:00
|
|
|
if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
|
|
|
|
xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
|
2011-03-06 23:06:35 +00:00
|
|
|
error = xfs_alert_fsblock_zero(ip, imap);
|
2024-02-22 20:31:51 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-03-27 14:34:50 +00:00
|
|
|
out_unlock:
|
2022-11-28 22:09:17 +00:00
|
|
|
*seq = xfs_iomap_inode_sequence(ip, 0);
|
2019-10-30 19:24:59 +00:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2012-03-27 14:34:50 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-03-27 14:34:50 +00:00
|
|
|
out_trans_cancel:
|
2015-06-04 03:47:56 +00:00
|
|
|
xfs_trans_cancel(tp);
|
2012-03-27 14:34:50 +00:00
|
|
|
goto out_unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-03-18 14:51:47 +00:00
|
|
|
STATIC bool
|
|
|
|
xfs_quota_need_throttle(
|
2020-07-16 00:53:43 +00:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_dqtype_t type,
|
|
|
|
xfs_fsblock_t alloc_blocks)
|
2013-03-18 14:51:47 +00:00
|
|
|
{
|
2020-07-16 00:53:43 +00:00
|
|
|
struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
|
2013-03-18 14:51:47 +00:00
|
|
|
|
|
|
|
if (!dq || !xfs_this_quota_on(ip->i_mount, type))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* no hi watermark, no throttle */
|
|
|
|
if (!dq->q_prealloc_hi_wmark)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* under the lo watermark, no throttle */
|
2020-07-14 17:37:30 +00:00
|
|
|
if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark)
|
2013-03-18 14:51:47 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
|
|
|
xfs_quota_calc_throttle(
|
2020-07-16 00:53:43 +00:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_dqtype_t type,
|
|
|
|
xfs_fsblock_t *qblocks,
|
|
|
|
int *qshift,
|
|
|
|
int64_t *qfreesp)
|
2013-03-18 14:51:47 +00:00
|
|
|
{
|
2020-07-16 00:53:43 +00:00
|
|
|
struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
|
|
|
|
int64_t freesp;
|
|
|
|
int shift = 0;
|
2013-03-18 14:51:47 +00:00
|
|
|
|
2014-10-01 23:27:09 +00:00
|
|
|
/* no dq, or over hi wmark, squash the prealloc completely */
|
2020-07-14 17:37:30 +00:00
|
|
|
if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) {
|
2013-03-18 14:51:47 +00:00
|
|
|
*qblocks = 0;
|
2014-07-24 09:56:08 +00:00
|
|
|
*qfreesp = 0;
|
2013-03-18 14:51:47 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-07-14 17:37:30 +00:00
|
|
|
freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved;
|
2013-03-18 14:51:47 +00:00
|
|
|
if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
|
|
|
|
shift = 2;
|
|
|
|
if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
|
|
|
|
shift += 2;
|
|
|
|
if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
|
|
|
|
shift += 2;
|
|
|
|
}
|
|
|
|
|
2014-07-24 09:56:08 +00:00
|
|
|
if (freesp < *qfreesp)
|
|
|
|
*qfreesp = freesp;
|
|
|
|
|
2013-03-18 14:51:47 +00:00
|
|
|
/* only overwrite the throttle values if we are more aggressive */
|
|
|
|
if ((freesp >> shift) < (*qblocks >> *qshift)) {
|
|
|
|
*qblocks = freesp;
|
|
|
|
*qshift = shift;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-22 11:20:16 +00:00
|
|
|
static int64_t
|
|
|
|
xfs_iomap_freesp(
|
|
|
|
struct percpu_counter *counter,
|
|
|
|
uint64_t low_space[XFS_LOWSP_MAX],
|
|
|
|
int *shift)
|
|
|
|
{
|
|
|
|
int64_t freesp;
|
|
|
|
|
|
|
|
freesp = percpu_counter_read_positive(counter);
|
|
|
|
if (freesp < low_space[XFS_LOWSP_5_PCNT]) {
|
|
|
|
*shift = 2;
|
|
|
|
if (freesp < low_space[XFS_LOWSP_4_PCNT])
|
|
|
|
(*shift)++;
|
|
|
|
if (freesp < low_space[XFS_LOWSP_3_PCNT])
|
|
|
|
(*shift)++;
|
|
|
|
if (freesp < low_space[XFS_LOWSP_2_PCNT])
|
|
|
|
(*shift)++;
|
|
|
|
if (freesp < low_space[XFS_LOWSP_1_PCNT])
|
|
|
|
(*shift)++;
|
|
|
|
}
|
|
|
|
return freesp;
|
|
|
|
}
|
|
|
|
|
2011-01-04 00:35:03 +00:00
|
|
|
/*
|
|
|
|
* If we don't have a user specified preallocation size, dynamically increase
|
2016-09-19 01:10:21 +00:00
|
|
|
* the preallocation size as the size of the file grows. Cap the maximum size
|
2011-01-04 00:35:03 +00:00
|
|
|
* at a single extent or less if the filesystem is near full. The closer the
|
2020-05-23 16:43:30 +00:00
|
|
|
* filesystem is to being full, the smaller the maximum preallocation.
|
2011-01-04 00:35:03 +00:00
|
|
|
*/
|
|
|
|
STATIC xfs_fsblock_t
|
|
|
|
xfs_iomap_prealloc_size(
|
2013-02-11 05:05:01 +00:00
|
|
|
struct xfs_inode *ip,
|
2019-02-18 17:38:49 +00:00
|
|
|
int whichfork,
|
2016-09-19 01:10:21 +00:00
|
|
|
loff_t offset,
|
|
|
|
loff_t count,
|
2017-11-03 17:34:43 +00:00
|
|
|
struct xfs_iext_cursor *icur)
|
2011-01-04 00:35:03 +00:00
|
|
|
{
|
2020-05-23 16:43:30 +00:00
|
|
|
struct xfs_iext_cursor ncur = *icur;
|
|
|
|
struct xfs_bmbt_irec prev, got;
|
2016-09-19 01:10:21 +00:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2022-07-09 17:56:05 +00:00
|
|
|
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
|
2016-09-19 01:10:21 +00:00
|
|
|
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
2013-03-18 14:51:43 +00:00
|
|
|
int64_t freesp;
|
2013-03-18 14:51:47 +00:00
|
|
|
xfs_fsblock_t qblocks;
|
2016-09-19 01:10:21 +00:00
|
|
|
xfs_fsblock_t alloc_blocks = 0;
|
2020-05-23 16:43:30 +00:00
|
|
|
xfs_extlen_t plen;
|
|
|
|
int shift = 0;
|
|
|
|
int qshift = 0;
|
2016-09-19 01:10:21 +00:00
|
|
|
|
2020-05-23 16:43:30 +00:00
|
|
|
/*
|
|
|
|
* As an exception we don't do any preallocation at all if the file is
|
|
|
|
* smaller than the minimum preallocation and we are using the default
|
|
|
|
* dynamic preallocation scheme, as it is likely this is the only write
|
|
|
|
* to the file that is going to be done.
|
|
|
|
*/
|
|
|
|
if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
|
2016-09-19 01:10:21 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
2020-05-23 16:43:30 +00:00
|
|
|
* Use the minimum preallocation size for small files or if we are
|
|
|
|
* writing right after a hole.
|
2016-09-19 01:10:21 +00:00
|
|
|
*/
|
2020-05-23 16:43:30 +00:00
|
|
|
if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
|
2020-05-23 16:43:30 +00:00
|
|
|
!xfs_iext_prev_extent(ifp, &ncur, &prev) ||
|
2016-11-24 00:39:44 +00:00
|
|
|
prev.br_startoff + prev.br_blockcount < offset_fsb)
|
2019-10-28 15:41:44 +00:00
|
|
|
return mp->m_allocsize_blocks;
|
2011-01-04 00:35:03 +00:00
|
|
|
|
2016-09-19 01:10:21 +00:00
|
|
|
/*
|
2020-05-23 16:43:30 +00:00
|
|
|
* Take the size of the preceding data extents as the basis for the
|
|
|
|
* preallocation size. Note that we don't care if the previous extents
|
|
|
|
* are written or not.
|
2016-09-19 01:10:21 +00:00
|
|
|
*/
|
2020-05-23 16:43:30 +00:00
|
|
|
plen = prev.br_blockcount;
|
|
|
|
while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
|
2021-08-09 06:35:22 +00:00
|
|
|
if (plen > XFS_MAX_BMBT_EXTLEN / 2 ||
|
2020-05-23 16:43:30 +00:00
|
|
|
isnullstartblock(got.br_startblock) ||
|
|
|
|
got.br_startoff + got.br_blockcount != prev.br_startoff ||
|
|
|
|
got.br_startblock + got.br_blockcount != prev.br_startblock)
|
|
|
|
break;
|
|
|
|
plen += got.br_blockcount;
|
|
|
|
prev = got;
|
|
|
|
}
|
2020-05-23 16:43:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the size of the extents is greater than half the maximum extent
|
|
|
|
* length, then use the current offset as the basis. This ensures that
|
2021-08-09 06:35:22 +00:00
|
|
|
* for large files the preallocation size always extends to
|
|
|
|
* XFS_BMBT_MAX_EXTLEN rather than falling short due to things like stripe
|
|
|
|
* unit/width alignment of real extents.
|
2020-05-23 16:43:30 +00:00
|
|
|
*/
|
2020-05-23 16:43:30 +00:00
|
|
|
alloc_blocks = plen * 2;
|
2021-08-09 06:35:22 +00:00
|
|
|
if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
|
2016-09-19 01:10:21 +00:00
|
|
|
alloc_blocks = XFS_B_TO_FSB(mp, offset);
|
2013-03-18 14:51:47 +00:00
|
|
|
qblocks = alloc_blocks;
|
2013-03-18 14:51:43 +00:00
|
|
|
|
2013-03-18 14:51:44 +00:00
|
|
|
/*
|
2021-08-09 06:35:22 +00:00
|
|
|
* XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc
|
|
|
|
* down to the nearest power of two value after throttling. To prevent
|
|
|
|
* the round down from unconditionally reducing the maximum supported
|
|
|
|
* prealloc size, we round up first, apply appropriate throttling, round
|
|
|
|
* down and cap the value to XFS_BMBT_MAX_EXTLEN.
|
2013-03-18 14:51:44 +00:00
|
|
|
*/
|
2021-08-09 06:35:22 +00:00
|
|
|
alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(XFS_MAX_BMBT_EXTLEN),
|
2013-03-18 14:51:44 +00:00
|
|
|
alloc_blocks);
|
2013-03-18 14:51:43 +00:00
|
|
|
|
2024-04-22 11:20:16 +00:00
|
|
|
if (unlikely(XFS_IS_REALTIME_INODE(ip)))
|
|
|
|
freesp = xfs_rtx_to_rtb(mp,
|
|
|
|
xfs_iomap_freesp(&mp->m_frextents,
|
|
|
|
mp->m_low_rtexts, &shift));
|
|
|
|
else
|
|
|
|
freesp = xfs_iomap_freesp(&mp->m_fdblocks, mp->m_low_space,
|
|
|
|
&shift);
|
2013-03-18 14:51:47 +00:00
|
|
|
|
|
|
|
/*
|
2014-07-24 09:56:08 +00:00
|
|
|
* Check each quota to cap the prealloc size, provide a shift value to
|
|
|
|
* throttle with and adjust amount of available space.
|
2013-03-18 14:51:47 +00:00
|
|
|
*/
|
2020-07-16 00:42:36 +00:00
|
|
|
if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks))
|
|
|
|
xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift,
|
2014-07-24 09:56:08 +00:00
|
|
|
&freesp);
|
2020-07-16 00:42:36 +00:00
|
|
|
if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks))
|
|
|
|
xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift,
|
2014-07-24 09:56:08 +00:00
|
|
|
&freesp);
|
2020-07-16 00:42:36 +00:00
|
|
|
if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks))
|
|
|
|
xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift,
|
2014-07-24 09:56:08 +00:00
|
|
|
&freesp);
|
2013-03-18 14:51:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The final prealloc size is set to the minimum of free space available
|
|
|
|
* in each of the quotas and the overall filesystem.
|
|
|
|
*
|
|
|
|
* The shift throttle value is set to the maximum value as determined by
|
|
|
|
* the global low free space values and per-quota low free space values.
|
|
|
|
*/
|
2018-06-07 14:54:02 +00:00
|
|
|
alloc_blocks = min(alloc_blocks, qblocks);
|
|
|
|
shift = max(shift, qshift);
|
2013-03-18 14:51:47 +00:00
|
|
|
|
2013-03-18 14:51:43 +00:00
|
|
|
if (shift)
|
|
|
|
alloc_blocks >>= shift;
|
2013-03-18 14:51:44 +00:00
|
|
|
/*
|
|
|
|
* rounddown_pow_of_two() returns an undefined result if we pass in
|
|
|
|
* alloc_blocks = 0.
|
|
|
|
*/
|
|
|
|
if (alloc_blocks)
|
|
|
|
alloc_blocks = rounddown_pow_of_two(alloc_blocks);
|
2021-08-09 06:35:22 +00:00
|
|
|
if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
|
|
|
|
alloc_blocks = XFS_MAX_BMBT_EXTLEN;
|
2013-03-18 14:51:43 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are still trying to allocate more space than is
|
|
|
|
* available, squash the prealloc hard. This can happen if we
|
|
|
|
* have a large file on a small filesystem and the above
|
2021-08-09 06:35:22 +00:00
|
|
|
* lowspace thresholds are smaller than XFS_BMBT_MAX_EXTLEN.
|
2013-03-18 14:51:43 +00:00
|
|
|
*/
|
|
|
|
while (alloc_blocks && alloc_blocks >= freesp)
|
|
|
|
alloc_blocks >>= 4;
|
2019-10-28 15:41:44 +00:00
|
|
|
if (alloc_blocks < mp->m_allocsize_blocks)
|
|
|
|
alloc_blocks = mp->m_allocsize_blocks;
|
2013-03-18 14:51:48 +00:00
|
|
|
trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
|
2019-10-28 15:41:44 +00:00
|
|
|
mp->m_allocsize_blocks);
|
2011-01-04 00:35:03 +00:00
|
|
|
return alloc_blocks;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
int
|
|
|
|
xfs_iomap_write_unwritten(
|
|
|
|
xfs_inode_t *ip,
|
2005-05-05 20:33:40 +00:00
|
|
|
xfs_off_t offset,
|
2017-09-21 18:26:18 +00:00
|
|
|
xfs_off_t count,
|
|
|
|
bool update_isize)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t offset_fsb;
|
|
|
|
xfs_filblks_t count_fsb;
|
|
|
|
xfs_filblks_t numblks_fsb;
|
2006-01-11 04:28:28 +00:00
|
|
|
int nimaps;
|
|
|
|
xfs_trans_t *tp;
|
|
|
|
xfs_bmbt_irec_t imap;
|
2017-09-21 18:26:18 +00:00
|
|
|
struct inode *inode = VFS_I(ip);
|
2012-02-29 09:53:50 +00:00
|
|
|
xfs_fsize_t i_size;
|
2006-01-11 04:28:28 +00:00
|
|
|
uint resblks;
|
2005-04-16 22:20:36 +00:00
|
|
|
int error;
|
|
|
|
|
2009-12-14 23:14:59 +00:00
|
|
|
trace_xfs_unwritten_convert(ip, offset, count);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
|
|
|
|
count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
|
|
|
|
|
2008-06-27 03:32:53 +00:00
|
|
|
/*
|
|
|
|
* Reserve enough blocks in this transaction for two complete extent
|
|
|
|
* btree splits. We may be converting the middle part of an unwritten
|
|
|
|
* extent and in this case we will insert two new extents in the btree
|
|
|
|
* each of which could cause a full split.
|
|
|
|
*
|
|
|
|
* This reservation amount will be used in the first call to
|
|
|
|
* xfs_bmbt_split() to select an AG with enough space to satisfy the
|
|
|
|
* rest of the operation.
|
|
|
|
*/
|
2006-01-11 04:28:28 +00:00
|
|
|
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-11-09 07:04:20 +00:00
|
|
|
/* Attach dquots so that bmbt splits are accounted correctly. */
|
|
|
|
error = xfs_qm_dqattach(ip);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2006-01-11 04:28:28 +00:00
|
|
|
do {
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2016-04-05 23:19:55 +00:00
|
|
|
* Set up a transaction to convert the range of extents
|
2005-04-16 22:20:36 +00:00
|
|
|
* from unwritten to real. Do allocations in a loop until
|
|
|
|
* we have covered the range passed in.
|
2009-10-19 04:00:03 +00:00
|
|
|
*
|
2016-04-05 23:19:55 +00:00
|
|
|
* Note that we can't risk to recursing back into the filesystem
|
|
|
|
* here as we might be asked to write out the same inode that we
|
|
|
|
* complete here and might deadlock on the iolock.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2021-01-27 00:33:29 +00:00
|
|
|
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks,
|
2021-01-27 00:44:07 +00:00
|
|
|
0, true, &tp);
|
2016-04-05 23:19:55 +00:00
|
|
|
if (error)
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2024-05-02 07:33:55 +00:00
|
|
|
error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
|
2021-01-23 00:48:14 +00:00
|
|
|
XFS_IEXT_WRITE_UNWRITTEN_CNT);
|
2019-11-09 07:04:20 +00:00
|
|
|
if (error)
|
|
|
|
goto error_on_bmapi_transaction;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Modify the unwritten extent state of the buffer.
|
|
|
|
*/
|
|
|
|
nimaps = 1;
|
2011-09-18 20:40:52 +00:00
|
|
|
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
|
2018-07-12 05:26:25 +00:00
|
|
|
XFS_BMAPI_CONVERT, resblks, &imap,
|
|
|
|
&nimaps);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (error)
|
|
|
|
goto error_on_bmapi_transaction;
|
|
|
|
|
2012-02-29 09:53:50 +00:00
|
|
|
/*
|
|
|
|
* Log the updated inode size as we go. We have to be careful
|
|
|
|
* to only log it up to the actual write offset if it is
|
|
|
|
* halfway into a block.
|
|
|
|
*/
|
|
|
|
i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
|
|
|
|
if (i_size > offset + count)
|
|
|
|
i_size = offset + count;
|
2017-09-21 18:26:18 +00:00
|
|
|
if (update_isize && i_size > i_size_read(inode))
|
|
|
|
i_size_write(inode, i_size);
|
2012-02-29 09:53:50 +00:00
|
|
|
i_size = xfs_new_eof(ip, i_size);
|
|
|
|
if (i_size) {
|
2021-03-29 18:11:40 +00:00
|
|
|
ip->i_disk_size = i_size;
|
2012-02-29 09:53:50 +00:00
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
}
|
|
|
|
|
2015-06-04 03:48:08 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
|
if (error)
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2006-09-28 01:03:20 +00:00
|
|
|
|
2024-02-22 20:31:51 +00:00
|
|
|
if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock))) {
|
|
|
|
xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
|
2011-03-06 23:06:35 +00:00
|
|
|
return xfs_alert_fsblock_zero(ip, &imap);
|
2024-02-22 20:31:51 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if ((numblks_fsb = imap.br_blockcount) == 0) {
|
|
|
|
/*
|
|
|
|
* The numblks_fsb value should always get
|
|
|
|
* smaller, otherwise the loop is stuck.
|
|
|
|
*/
|
|
|
|
ASSERT(imap.br_blockcount);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
offset_fsb += numblks_fsb;
|
|
|
|
count_fsb -= numblks_fsb;
|
|
|
|
} while (count_fsb > 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_on_bmapi_transaction:
|
2015-06-04 03:47:56 +00:00
|
|
|
xfs_trans_cancel(tp);
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2016-06-20 23:52:47 +00:00
|
|
|
|
2018-05-02 19:54:54 +00:00
|
|
|
static inline bool
|
|
|
|
imap_needs_alloc(
|
|
|
|
struct inode *inode,
|
2019-10-19 16:09:47 +00:00
|
|
|
unsigned flags,
|
2018-05-02 19:54:54 +00:00
|
|
|
struct xfs_bmbt_irec *imap,
|
|
|
|
int nimaps)
|
2016-06-20 23:53:44 +00:00
|
|
|
{
|
2019-10-19 16:09:47 +00:00
|
|
|
/* don't allocate blocks when just zeroing */
|
|
|
|
if (flags & IOMAP_ZERO)
|
|
|
|
return false;
|
|
|
|
if (!nimaps ||
|
|
|
|
imap->br_startblock == HOLESTARTBLOCK ||
|
|
|
|
imap->br_startblock == DELAYSTARTBLOCK)
|
|
|
|
return true;
|
|
|
|
/* we convert unwritten extents before copying the data for DAX */
|
2021-11-29 10:21:58 +00:00
|
|
|
if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN)
|
2019-10-19 16:09:47 +00:00
|
|
|
return true;
|
|
|
|
return false;
|
2016-06-20 23:53:44 +00:00
|
|
|
}
|
|
|
|
|
2018-05-02 19:54:54 +00:00
|
|
|
static inline bool
|
2019-10-19 16:09:47 +00:00
|
|
|
imap_needs_cow(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
unsigned int flags,
|
2018-05-02 19:54:54 +00:00
|
|
|
struct xfs_bmbt_irec *imap,
|
|
|
|
int nimaps)
|
2018-03-01 22:10:31 +00:00
|
|
|
{
|
2019-10-19 16:09:47 +00:00
|
|
|
if (!xfs_is_cow_inode(ip))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* when zeroing we don't have to COW holes or unwritten extents */
|
2024-10-03 15:09:01 +00:00
|
|
|
if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) {
|
2019-10-19 16:09:47 +00:00
|
|
|
if (!nimaps ||
|
|
|
|
imap->br_startblock == HOLESTARTBLOCK ||
|
|
|
|
imap->br_state == XFS_EXT_UNWRITTEN)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2018-03-01 22:10:31 +00:00
|
|
|
}
|
|
|
|
|
2024-06-23 05:44:26 +00:00
|
|
|
/*
|
|
|
|
* Extents not yet cached requires exclusive access, don't block for
|
|
|
|
* IOMAP_NOWAIT.
|
|
|
|
*
|
|
|
|
* This is basically an opencoded xfs_ilock_data_map_shared() call, but with
|
|
|
|
* support for IOMAP_NOWAIT.
|
|
|
|
*/
|
2018-05-02 19:54:54 +00:00
|
|
|
static int
|
|
|
|
xfs_ilock_for_iomap(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
unsigned flags,
|
|
|
|
unsigned *lockmode)
|
2016-11-30 03:37:15 +00:00
|
|
|
{
|
2018-05-02 19:54:54 +00:00
|
|
|
if (flags & IOMAP_NOWAIT) {
|
2024-06-23 05:44:26 +00:00
|
|
|
if (xfs_need_iread_extents(&ip->i_df))
|
|
|
|
return -EAGAIN;
|
|
|
|
if (!xfs_ilock_nowait(ip, *lockmode))
|
2018-05-02 19:54:54 +00:00
|
|
|
return -EAGAIN;
|
|
|
|
} else {
|
2024-06-23 05:44:26 +00:00
|
|
|
if (xfs_need_iread_extents(&ip->i_df))
|
|
|
|
*lockmode = XFS_ILOCK_EXCL;
|
|
|
|
xfs_ilock(ip, *lockmode);
|
2018-05-02 19:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2016-11-30 03:37:15 +00:00
|
|
|
}
|
|
|
|
|
2020-11-19 16:59:11 +00:00
|
|
|
/*
|
|
|
|
* Check that the imap we are going to return to the caller spans the entire
|
|
|
|
* range that the caller requested for the IO.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
imap_spans_range(
|
|
|
|
struct xfs_bmbt_irec *imap,
|
|
|
|
xfs_fileoff_t offset_fsb,
|
|
|
|
xfs_fileoff_t end_fsb)
|
|
|
|
{
|
|
|
|
if (imap->br_startoff > offset_fsb)
|
|
|
|
return false;
|
|
|
|
if (imap->br_startoff + imap->br_blockcount < end_fsb)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-10-19 16:09:46 +00:00
|
|
|
static int
|
2019-10-19 16:09:46 +00:00
|
|
|
xfs_direct_write_iomap_begin(
|
2016-06-20 23:53:44 +00:00
|
|
|
struct inode *inode,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t length,
|
|
|
|
unsigned flags,
|
2019-10-18 23:44:10 +00:00
|
|
|
struct iomap *iomap,
|
|
|
|
struct iomap *srcmap)
|
2016-06-20 23:53:44 +00:00
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2019-10-19 16:09:44 +00:00
|
|
|
struct xfs_bmbt_irec imap, cmap;
|
2019-10-19 16:09:44 +00:00
|
|
|
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
|
2016-06-20 23:53:44 +00:00
|
|
|
int nimaps = 1, error = 0;
|
2018-10-18 06:19:48 +00:00
|
|
|
bool shared = false;
|
2019-10-17 20:12:02 +00:00
|
|
|
u16 iomap_flags = 0;
|
2024-06-23 05:44:26 +00:00
|
|
|
unsigned int lockmode;
|
2022-11-28 22:09:17 +00:00
|
|
|
u64 seq;
|
2016-06-20 23:53:44 +00:00
|
|
|
|
2019-10-19 16:09:45 +00:00
|
|
|
ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
|
|
|
|
|
2021-08-19 01:46:53 +00:00
|
|
|
if (xfs_is_shutdown(mp))
|
2016-06-20 23:53:44 +00:00
|
|
|
return -EIO;
|
|
|
|
|
2019-10-19 16:09:47 +00:00
|
|
|
/*
|
|
|
|
* Writes that span EOF might trigger an IO size update on completion,
|
|
|
|
* so consider them to be dirty for the purposes of O_DSYNC even if
|
|
|
|
* there is no other metadata changes pending or have been made here.
|
|
|
|
*/
|
|
|
|
if (offset + length > i_size_read(inode))
|
|
|
|
iomap_flags |= IOMAP_F_DIRTY;
|
|
|
|
|
2024-06-23 05:44:26 +00:00
|
|
|
/*
|
|
|
|
* COW writes may allocate delalloc space or convert unwritten COW
|
|
|
|
* extents, so we need to make sure to take the lock exclusively here.
|
|
|
|
*/
|
|
|
|
if (xfs_is_cow_inode(ip))
|
|
|
|
lockmode = XFS_ILOCK_EXCL;
|
|
|
|
else
|
|
|
|
lockmode = XFS_ILOCK_SHARED;
|
|
|
|
|
|
|
|
relock:
|
2018-05-02 19:54:54 +00:00
|
|
|
error = xfs_ilock_for_iomap(ip, flags, &lockmode);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2017-06-20 12:05:48 +00:00
|
|
|
|
2024-06-23 05:44:26 +00:00
|
|
|
/*
|
|
|
|
* The reflink iflag could have changed since the earlier unlocked
|
|
|
|
* check, check if it again and relock if needed.
|
|
|
|
*/
|
|
|
|
if (xfs_is_cow_inode(ip) && lockmode == XFS_ILOCK_SHARED) {
|
|
|
|
xfs_iunlock(ip, lockmode);
|
|
|
|
lockmode = XFS_ILOCK_EXCL;
|
|
|
|
goto relock;
|
|
|
|
}
|
|
|
|
|
2016-06-20 23:53:44 +00:00
|
|
|
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
|
2016-10-03 16:11:36 +00:00
|
|
|
&nimaps, 0);
|
2016-10-20 04:53:50 +00:00
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
2016-10-03 16:11:36 +00:00
|
|
|
|
2019-10-19 16:09:47 +00:00
|
|
|
if (imap_needs_cow(ip, flags, &imap, nimaps)) {
|
2019-10-19 16:09:47 +00:00
|
|
|
error = -EAGAIN;
|
|
|
|
if (flags & IOMAP_NOWAIT)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2019-02-18 17:38:46 +00:00
|
|
|
/* may drop and re-acquire the ilock */
|
2019-10-19 16:09:43 +00:00
|
|
|
error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
|
2022-06-03 05:37:37 +00:00
|
|
|
&lockmode,
|
|
|
|
(flags & IOMAP_DIRECT) || IS_DAX(inode));
|
2019-02-18 17:38:46 +00:00
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
2019-10-19 16:09:44 +00:00
|
|
|
if (shared)
|
|
|
|
goto out_found_cow;
|
2016-10-20 04:53:50 +00:00
|
|
|
end_fsb = imap.br_startoff + imap.br_blockcount;
|
|
|
|
length = XFS_FSB_TO_B(mp, end_fsb) - offset;
|
2016-06-20 23:53:44 +00:00
|
|
|
}
|
|
|
|
|
2019-10-19 16:09:47 +00:00
|
|
|
if (imap_needs_alloc(inode, flags, &imap, nimaps))
|
|
|
|
goto allocate_blocks;
|
2016-06-20 23:53:44 +00:00
|
|
|
|
2020-11-19 16:59:11 +00:00
|
|
|
/*
|
xfs: reduce exclusive locking on unaligned dio
Attempt shared locking for unaligned DIO, but only if the the
underlying extent is already allocated and in written state. On
failure, retry with the existing exclusive locking.
Test case is fio randrw of 512 byte IOs using AIO and an iodepth of
32 IOs.
Vanilla:
READ: bw=4560KiB/s (4670kB/s), 4560KiB/s-4560KiB/s (4670kB/s-4670kB/s), io=134MiB (140MB), run=30001-30001msec
WRITE: bw=4567KiB/s (4676kB/s), 4567KiB/s-4567KiB/s (4676kB/s-4676kB/s), io=134MiB (140MB), run=30001-30001msec
Patched:
READ: bw=37.6MiB/s (39.4MB/s), 37.6MiB/s-37.6MiB/s (39.4MB/s-39.4MB/s), io=1127MiB (1182MB), run=30002-30002msec
WRITE: bw=37.6MiB/s (39.4MB/s), 37.6MiB/s-37.6MiB/s (39.4MB/s-39.4MB/s), io=1128MiB (1183MB), run=30002-30002msec
That's an improvement from ~18k IOPS to a ~150k IOPS, which is
about the IOPS limit of the VM block device setup I'm testing on.
4kB block IO comparison:
READ: bw=296MiB/s (310MB/s), 296MiB/s-296MiB/s (310MB/s-310MB/s), io=8868MiB (9299MB), run=30002-30002msec
WRITE: bw=296MiB/s (310MB/s), 296MiB/s-296MiB/s (310MB/s-310MB/s), io=8878MiB (9309MB), run=30002-30002msec
Which is ~150k IOPS, same as what the test gets for sub-block
AIO+DIO writes with this patch.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
[hch: rebased, split unaligned from nowait]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2021-01-23 18:06:31 +00:00
|
|
|
* NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with
|
|
|
|
* a single map so that we avoid partial IO failures due to the rest of
|
|
|
|
* the I/O range not covered by this map triggering an EAGAIN condition
|
|
|
|
* when it is subsequently mapped and aborting the I/O.
|
2020-11-19 16:59:11 +00:00
|
|
|
*/
|
xfs: reduce exclusive locking on unaligned dio
Attempt shared locking for unaligned DIO, but only if the the
underlying extent is already allocated and in written state. On
failure, retry with the existing exclusive locking.
Test case is fio randrw of 512 byte IOs using AIO and an iodepth of
32 IOs.
Vanilla:
READ: bw=4560KiB/s (4670kB/s), 4560KiB/s-4560KiB/s (4670kB/s-4670kB/s), io=134MiB (140MB), run=30001-30001msec
WRITE: bw=4567KiB/s (4676kB/s), 4567KiB/s-4567KiB/s (4676kB/s-4676kB/s), io=134MiB (140MB), run=30001-30001msec
Patched:
READ: bw=37.6MiB/s (39.4MB/s), 37.6MiB/s-37.6MiB/s (39.4MB/s-39.4MB/s), io=1127MiB (1182MB), run=30002-30002msec
WRITE: bw=37.6MiB/s (39.4MB/s), 37.6MiB/s-37.6MiB/s (39.4MB/s-39.4MB/s), io=1128MiB (1183MB), run=30002-30002msec
That's an improvement from ~18k IOPS to a ~150k IOPS, which is
about the IOPS limit of the VM block device setup I'm testing on.
4kB block IO comparison:
READ: bw=296MiB/s (310MB/s), 296MiB/s-296MiB/s (310MB/s-310MB/s), io=8868MiB (9299MB), run=30002-30002msec
WRITE: bw=296MiB/s (310MB/s), 296MiB/s-296MiB/s (310MB/s-310MB/s), io=8878MiB (9309MB), run=30002-30002msec
Which is ~150k IOPS, same as what the test gets for sub-block
AIO+DIO writes with this patch.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
[hch: rebased, split unaligned from nowait]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2021-01-23 18:06:31 +00:00
|
|
|
if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) {
|
2020-11-19 16:59:11 +00:00
|
|
|
error = -EAGAIN;
|
xfs: reduce exclusive locking on unaligned dio
Attempt shared locking for unaligned DIO, but only if the the
underlying extent is already allocated and in written state. On
failure, retry with the existing exclusive locking.
Test case is fio randrw of 512 byte IOs using AIO and an iodepth of
32 IOs.
Vanilla:
READ: bw=4560KiB/s (4670kB/s), 4560KiB/s-4560KiB/s (4670kB/s-4670kB/s), io=134MiB (140MB), run=30001-30001msec
WRITE: bw=4567KiB/s (4676kB/s), 4567KiB/s-4567KiB/s (4676kB/s-4676kB/s), io=134MiB (140MB), run=30001-30001msec
Patched:
READ: bw=37.6MiB/s (39.4MB/s), 37.6MiB/s-37.6MiB/s (39.4MB/s-39.4MB/s), io=1127MiB (1182MB), run=30002-30002msec
WRITE: bw=37.6MiB/s (39.4MB/s), 37.6MiB/s-37.6MiB/s (39.4MB/s-39.4MB/s), io=1128MiB (1183MB), run=30002-30002msec
That's an improvement from ~18k IOPS to a ~150k IOPS, which is
about the IOPS limit of the VM block device setup I'm testing on.
4kB block IO comparison:
READ: bw=296MiB/s (310MB/s), 296MiB/s-296MiB/s (310MB/s-310MB/s), io=8868MiB (9299MB), run=30002-30002msec
WRITE: bw=296MiB/s (310MB/s), 296MiB/s-296MiB/s (310MB/s-310MB/s), io=8878MiB (9309MB), run=30002-30002msec
Which is ~150k IOPS, same as what the test gets for sub-block
AIO+DIO writes with this patch.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
[hch: rebased, split unaligned from nowait]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2021-01-23 18:06:31 +00:00
|
|
|
if (!imap_spans_range(&imap, offset_fsb, end_fsb))
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For overwrite only I/O, we cannot convert unwritten extents without
|
|
|
|
* requiring sub-block zeroing. This can only be done under an
|
|
|
|
* exclusive IOLOCK, hence return -EAGAIN if this is not a written
|
|
|
|
* extent to tell the caller to try again.
|
|
|
|
*/
|
|
|
|
if (flags & IOMAP_OVERWRITE_ONLY) {
|
|
|
|
error = -EAGAIN;
|
|
|
|
if (imap.br_state != XFS_EXT_NORM &&
|
|
|
|
((offset | length) & mp->m_blockmask))
|
|
|
|
goto out_unlock;
|
2020-11-19 16:59:11 +00:00
|
|
|
}
|
|
|
|
|
2022-11-28 22:09:17 +00:00
|
|
|
seq = xfs_iomap_inode_sequence(ip, iomap_flags);
|
2019-10-19 16:09:47 +00:00
|
|
|
xfs_iunlock(ip, lockmode);
|
|
|
|
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
|
2022-11-28 22:09:17 +00:00
|
|
|
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq);
|
2016-08-16 22:44:52 +00:00
|
|
|
|
2019-10-19 16:09:47 +00:00
|
|
|
allocate_blocks:
|
|
|
|
error = -EAGAIN;
|
xfs: reduce exclusive locking on unaligned dio
Attempt shared locking for unaligned DIO, but only if the the
underlying extent is already allocated and in written state. On
failure, retry with the existing exclusive locking.
Test case is fio randrw of 512 byte IOs using AIO and an iodepth of
32 IOs.
Vanilla:
READ: bw=4560KiB/s (4670kB/s), 4560KiB/s-4560KiB/s (4670kB/s-4670kB/s), io=134MiB (140MB), run=30001-30001msec
WRITE: bw=4567KiB/s (4676kB/s), 4567KiB/s-4567KiB/s (4676kB/s-4676kB/s), io=134MiB (140MB), run=30001-30001msec
Patched:
READ: bw=37.6MiB/s (39.4MB/s), 37.6MiB/s-37.6MiB/s (39.4MB/s-39.4MB/s), io=1127MiB (1182MB), run=30002-30002msec
WRITE: bw=37.6MiB/s (39.4MB/s), 37.6MiB/s-37.6MiB/s (39.4MB/s-39.4MB/s), io=1128MiB (1183MB), run=30002-30002msec
That's an improvement from ~18k IOPS to a ~150k IOPS, which is
about the IOPS limit of the VM block device setup I'm testing on.
4kB block IO comparison:
READ: bw=296MiB/s (310MB/s), 296MiB/s-296MiB/s (310MB/s-310MB/s), io=8868MiB (9299MB), run=30002-30002msec
WRITE: bw=296MiB/s (310MB/s), 296MiB/s-296MiB/s (310MB/s-310MB/s), io=8878MiB (9309MB), run=30002-30002msec
Which is ~150k IOPS, same as what the test gets for sub-block
AIO+DIO writes with this patch.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
[hch: rebased, split unaligned from nowait]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2021-01-23 18:06:31 +00:00
|
|
|
if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY))
|
2018-05-02 19:54:53 +00:00
|
|
|
goto out_unlock;
|
2016-06-20 23:53:44 +00:00
|
|
|
|
2018-05-02 19:54:53 +00:00
|
|
|
/*
|
|
|
|
* We cap the maximum length we map to a sane size to keep the chunks
|
|
|
|
* of work done where somewhat symmetric with the work writeback does.
|
|
|
|
* This is a completely arbitrary number pulled out of thin air as a
|
|
|
|
* best guess for initial testing.
|
|
|
|
*
|
|
|
|
* Note that the values needs to be less than 32-bits wide until the
|
|
|
|
* lower level functions are updated.
|
|
|
|
*/
|
|
|
|
length = min_t(loff_t, length, 1024 * PAGE_SIZE);
|
2019-10-30 19:24:59 +00:00
|
|
|
end_fsb = xfs_iomap_end_fsb(mp, offset, length);
|
2018-05-02 19:54:53 +00:00
|
|
|
|
2019-10-30 19:24:59 +00:00
|
|
|
if (offset + length > XFS_ISIZE(ip))
|
|
|
|
end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
|
|
|
|
else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
|
|
|
|
end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
|
|
|
|
xfs_iunlock(ip, lockmode);
|
|
|
|
|
|
|
|
error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
|
2022-11-28 22:09:17 +00:00
|
|
|
flags, &imap, &seq);
|
2018-05-02 19:54:53 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2019-02-15 16:02:46 +00:00
|
|
|
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
|
2021-11-29 10:21:57 +00:00
|
|
|
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
|
2022-11-28 22:09:17 +00:00
|
|
|
iomap_flags | IOMAP_F_NEW, seq);
|
2018-05-02 19:54:53 +00:00
|
|
|
|
2019-10-19 16:09:44 +00:00
|
|
|
out_found_cow:
|
|
|
|
length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
|
|
|
|
trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
|
|
|
|
if (imap.br_startblock != HOLESTARTBLOCK) {
|
2022-11-28 22:09:17 +00:00
|
|
|
seq = xfs_iomap_inode_sequence(ip, 0);
|
|
|
|
error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
|
2019-10-19 16:09:44 +00:00
|
|
|
if (error)
|
2022-11-28 22:09:17 +00:00
|
|
|
goto out_unlock;
|
2019-10-19 16:09:44 +00:00
|
|
|
}
|
2022-11-28 22:09:17 +00:00
|
|
|
seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
|
|
|
|
xfs_iunlock(ip, lockmode);
|
|
|
|
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq);
|
2019-10-19 16:09:44 +00:00
|
|
|
|
2016-10-20 04:53:50 +00:00
|
|
|
out_unlock:
|
2021-01-27 18:07:27 +00:00
|
|
|
if (lockmode)
|
|
|
|
xfs_iunlock(ip, lockmode);
|
2016-10-20 04:53:50 +00:00
|
|
|
return error;
|
2016-06-20 23:53:44 +00:00
|
|
|
}
|
|
|
|
|
2019-10-19 16:09:46 +00:00
|
|
|
const struct iomap_ops xfs_direct_write_iomap_ops = {
|
|
|
|
.iomap_begin = xfs_direct_write_iomap_begin,
|
|
|
|
};
|
|
|
|
|
2022-06-03 05:37:37 +00:00
|
|
|
static int
|
|
|
|
xfs_dax_write_iomap_end(
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t pos,
|
|
|
|
loff_t length,
|
|
|
|
ssize_t written,
|
|
|
|
unsigned flags,
|
|
|
|
struct iomap *iomap)
|
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
|
|
|
|
if (!xfs_is_cow_inode(ip))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!written) {
|
|
|
|
xfs_reflink_cancel_cow_range(ip, pos, length, true);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return xfs_reflink_end_cow(ip, pos, written);
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct iomap_ops xfs_dax_write_iomap_ops = {
|
|
|
|
.iomap_begin = xfs_direct_write_iomap_begin,
|
|
|
|
.iomap_end = xfs_dax_write_iomap_end,
|
|
|
|
};
|
|
|
|
|
2019-10-19 16:09:46 +00:00
|
|
|
static int
|
2019-10-19 16:09:46 +00:00
|
|
|
xfs_buffered_write_iomap_begin(
|
2019-10-19 16:09:46 +00:00
|
|
|
struct inode *inode,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t count,
|
|
|
|
unsigned flags,
|
|
|
|
struct iomap *iomap,
|
|
|
|
struct iomap *srcmap)
|
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count);
|
|
|
|
struct xfs_bmbt_irec imap, cmap;
|
|
|
|
struct xfs_iext_cursor icur, ccur;
|
|
|
|
xfs_fsblock_t prealloc_blocks = 0;
|
|
|
|
bool eof = false, cow_eof = false, shared = false;
|
2019-10-19 16:09:47 +00:00
|
|
|
int allocfork = XFS_DATA_FORK;
|
2019-10-19 16:09:46 +00:00
|
|
|
int error = 0;
|
2022-06-23 17:51:57 +00:00
|
|
|
unsigned int lockmode = XFS_ILOCK_EXCL;
|
2024-10-08 08:59:19 +00:00
|
|
|
unsigned int iomap_flags = 0;
|
2022-11-28 22:09:17 +00:00
|
|
|
u64 seq;
|
2019-10-19 16:09:46 +00:00
|
|
|
|
2021-08-19 01:46:53 +00:00
|
|
|
if (xfs_is_shutdown(mp))
|
2021-02-11 01:27:20 +00:00
|
|
|
return -EIO;
|
|
|
|
|
2019-10-19 16:09:46 +00:00
|
|
|
/* we can't use delayed allocations when using extent size hints */
|
|
|
|
if (xfs_get_extsz_hint(ip))
|
|
|
|
return xfs_direct_write_iomap_begin(inode, offset, count,
|
|
|
|
flags, iomap, srcmap);
|
|
|
|
|
xfs: attach dquots to inode before reading data/cow fork mappings
I've been running near-continuous integration testing of online fsck,
and I've noticed that once a day, one of the ARM VMs will fail the test
with out of order records in the data fork.
xfs/804 races fsstress with online scrub (aka scan but do not change
anything), so I think this might be a bug in the core xfs code. This
also only seems to trigger if one runs the test for more than ~6 minutes
via TIME_FACTOR=13 or something.
https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfstests-dev.git/tree/tests/xfs/804?h=djwong-wtf
I added a debugging patch to the kernel to check the data fork extents
after taking the ILOCK, before dropping ILOCK, and before and after each
bmapping operation. So far I've narrowed it down to the delalloc code
inserting a record in the wrong place in the iext tree:
xfs_bmap_add_extent_hole_delay, near line 2691:
case 0:
/*
* New allocation is not contiguous with another
* delayed allocation.
* Insert a new entry.
*/
oldlen = newlen = 0;
xfs_iunlock_check_datafork(ip); <-- ok here
xfs_iext_insert(ip, icur, new, state);
xfs_iunlock_check_datafork(ip); <-- bad here
break;
}
I recorded the state of the data fork mappings and iext cursor state
when a corrupt data fork is detected immediately after the
xfs_bmap_add_extent_hole_delay call in xfs_bmapi_reserve_delalloc:
ino 0x140bb3 func xfs_bmapi_reserve_delalloc line 4164 data fork:
ino 0x140bb3 nr 0x0 nr_real 0x0 offset 0xb9 blockcount 0x1f startblock 0x935de2 state 1
ino 0x140bb3 nr 0x1 nr_real 0x1 offset 0xe6 blockcount 0xa startblock 0xffffffffe0007 state 0
ino 0x140bb3 nr 0x2 nr_real 0x1 offset 0xd8 blockcount 0xe startblock 0x935e01 state 0
Here we see that a delalloc extent was inserted into the wrong position
in the iext leaf, same as all the other times. The extra trace data I
collected are as follows:
ino 0x140bb3 fork 0 oldoff 0xe6 oldlen 0x4 oldprealloc 0x6 isize 0xe6000
ino 0x140bb3 oldgotoff 0xea oldgotstart 0xfffffffffffffffe oldgotcount 0x0 oldgotstate 0
ino 0x140bb3 crapgotoff 0x0 crapgotstart 0x0 crapgotcount 0x0 crapgotstate 0
ino 0x140bb3 freshgotoff 0xd8 freshgotstart 0x935e01 freshgotcount 0xe freshgotstate 0
ino 0x140bb3 nowgotoff 0xe6 nowgotstart 0xffffffffe0007 nowgotcount 0xa nowgotstate 0
ino 0x140bb3 oldicurpos 1 oldleafnr 2 oldleaf 0xfffffc00f0609a00
ino 0x140bb3 crapicurpos 2 crapleafnr 2 crapleaf 0xfffffc00f0609a00
ino 0x140bb3 freshicurpos 1 freshleafnr 2 freshleaf 0xfffffc00f0609a00
ino 0x140bb3 newicurpos 1 newleafnr 3 newleaf 0xfffffc00f0609a00
The first line shows that xfs_bmapi_reserve_delalloc was called with
whichfork=XFS_DATA_FORK, off=0xe6, len=0x4, prealloc=6.
The second line ("oldgot") shows the contents of @got at the beginning
of the call, which are the results of the first iext lookup in
xfs_buffered_write_iomap_begin.
Line 3 ("crapgot") is the result of duplicating the cursor at the start
of the body of xfs_bmapi_reserve_delalloc and performing a fresh lookup
at @off.
Line 4 ("freshgot") is the result of a new xfs_iext_get_extent right
before the call to xfs_bmap_add_extent_hole_delay. Totally garbage.
Line 5 ("nowgot") is contents of @got after the
xfs_bmap_add_extent_hole_delay call.
Line 6 is the contents of @icur at the beginning fo the call. Lines 7-9
are the contents of the iext cursors at the point where the block
mappings were sampled.
I think @oldgot is a HOLESTARTBLOCK extent because the first lookup
didn't find anything, so we filled in imap with "fake hole until the
end". At the time of the first lookup, I suspect that there's only one
32-block unwritten extent in the mapping (hence oldicurpos==1) but by
the time we get to recording crapgot, crapicurpos==2.
Dave then added:
Ok, that's much simpler to reason about, and implies the smoke is
coming from xfs_buffered_write_iomap_begin() or
xfs_bmapi_reserve_delalloc(). I suspect the former - it does a lot
of stuff with the ILOCK_EXCL held.....
.... including calling xfs_qm_dqattach_locked().
xfs_buffered_write_iomap_begin
ILOCK_EXCL
look up icur
xfs_qm_dqattach_locked
xfs_qm_dqattach_one
xfs_qm_dqget_inode
dquot cache miss
xfs_iunlock(ip, XFS_ILOCK_EXCL);
error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
xfs_ilock(ip, XFS_ILOCK_EXCL);
....
xfs_bmapi_reserve_delalloc(icur)
Yup, that's what is letting the magic smoke out -
xfs_qm_dqattach_locked() can cycle the ILOCK. If that happens, we
can pass a stale icur to xfs_bmapi_reserve_delalloc() and it all
goes downhill from there.
Back to Darrick now:
So. Fix this by moving the dqattach_locked call up before we take the
ILOCK, like all the other callers in that file.
Fixes: a526c85c2236 ("xfs: move xfs_file_iomap_begin_delay around") # goes further back than this
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-11-29 01:24:43 +00:00
|
|
|
error = xfs_qm_dqattach(ip);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2022-06-23 17:51:57 +00:00
|
|
|
error = xfs_ilock_for_iomap(ip, flags, &lockmode);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2019-10-19 16:09:46 +00:00
|
|
|
|
2020-05-18 17:28:05 +00:00
|
|
|
if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
|
2019-11-11 20:53:22 +00:00
|
|
|
XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
|
2024-02-22 20:31:51 +00:00
|
|
|
xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
|
2019-10-19 16:09:46 +00:00
|
|
|
error = -EFSCORRUPTED;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
XFS_STATS_INC(mp, xs_blk_mapw);
|
|
|
|
|
2021-04-13 18:15:09 +00:00
|
|
|
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
2019-10-19 16:09:46 +00:00
|
|
|
|
|
|
|
/*
|
2020-08-05 15:49:58 +00:00
|
|
|
* Search the data fork first to look up our source mapping. We
|
2019-10-19 16:09:46 +00:00
|
|
|
* always need the data fork map, as we have to return it to the
|
|
|
|
* iomap code so that the higher level write code can read data in to
|
|
|
|
* perform read-modify-write cycles for unaligned writes.
|
|
|
|
*/
|
|
|
|
eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
|
|
|
|
if (eof)
|
|
|
|
imap.br_startoff = end_fsb; /* fake hole until the end */
|
|
|
|
|
2023-05-01 23:14:51 +00:00
|
|
|
/* We never need to allocate blocks for zeroing or unsharing a hole. */
|
|
|
|
if ((flags & (IOMAP_UNSHARE | IOMAP_ZERO)) &&
|
|
|
|
imap.br_startoff > offset_fsb) {
|
2019-10-19 16:09:46 +00:00
|
|
|
xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
xfs: convert delayed extents to unwritten when zeroing post eof blocks
Current clone operation could be non-atomic if the destination of a file
is beyond EOF, user could get a file with corrupted (zeroed) data on
crash.
The problem is about preallocations. If you write some data into a file:
[A...B)
and XFS decides to preallocate some post-eof blocks, then it can create
a delayed allocation reservation:
[A.........D)
The writeback path tries to convert delayed extents to real ones by
allocating blocks. If there aren't enough contiguous free space, we can
end up with two extents, the first real and the second still delalloc:
[A....C)[C.D)
After that, both the in-memory and the on-disk file sizes are still B.
If we clone into the range [E...F) from another file:
[A....C)[C.D) [E...F)
then xfs_reflink_zero_posteof() calls iomap_zero_range() to zero out the
range [B, E) beyond EOF and flush it. Since [C, D) is still a delalloc
extent, its pagecache will be zeroed and both the in-memory and on-disk
size will be updated to D after flushing but before cloning. This is
wrong, because the user can see the size change and read the zeroes
while the clone operation is ongoing.
We need to keep the in-memory and on-disk size before the clone
operation starts, so instead of writing zeroes through the page cache
for delayed ranges beyond EOF, we convert these ranges to unwritten and
invalidate any cached data over that range beyond EOF.
Suggested-by: Dave Chinner <david@fromorbit.com>
Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
2024-04-25 13:13:30 +00:00
|
|
|
/*
|
|
|
|
* For zeroing, trim a delalloc extent that extends beyond the EOF
|
|
|
|
* block. If it starts beyond the EOF block, convert it to an
|
|
|
|
* unwritten extent.
|
|
|
|
*/
|
|
|
|
if ((flags & IOMAP_ZERO) && imap.br_startoff <= offset_fsb &&
|
|
|
|
isnullstartblock(imap.br_startblock)) {
|
|
|
|
xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
|
|
|
|
|
|
|
|
if (offset_fsb >= eof_fsb)
|
|
|
|
goto convert_delay;
|
|
|
|
if (end_fsb > eof_fsb) {
|
|
|
|
end_fsb = eof_fsb;
|
|
|
|
xfs_trim_extent(&imap, offset_fsb,
|
|
|
|
end_fsb - offset_fsb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-19 16:09:46 +00:00
|
|
|
/*
|
|
|
|
* Search the COW fork extent list even if we did not find a data fork
|
|
|
|
* extent. This serves two purposes: first this implements the
|
|
|
|
* speculative preallocation using cowextsize, so that we also unshare
|
|
|
|
* block adjacent to shared blocks instead of just the shared blocks
|
|
|
|
* themselves. Second the lookup in the extent list is generally faster
|
|
|
|
* than going out to the shared extent tree.
|
|
|
|
*/
|
|
|
|
if (xfs_is_cow_inode(ip)) {
|
|
|
|
if (!ip->i_cowfp) {
|
|
|
|
ASSERT(!xfs_is_reflink_inode(ip));
|
|
|
|
xfs_ifork_init_cow(ip);
|
|
|
|
}
|
|
|
|
cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
|
|
|
|
&ccur, &cmap);
|
|
|
|
if (!cow_eof && cmap.br_startoff <= offset_fsb) {
|
|
|
|
trace_xfs_reflink_cow_found(ip, &cmap);
|
|
|
|
goto found_cow;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (imap.br_startoff <= offset_fsb) {
|
|
|
|
/*
|
|
|
|
* For reflink files we may need a delalloc reservation when
|
|
|
|
* overwriting shared extents. This includes zeroing of
|
|
|
|
* existing extents that contain data.
|
|
|
|
*/
|
|
|
|
if (!xfs_is_cow_inode(ip) ||
|
|
|
|
((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
|
|
|
|
trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
|
|
|
|
&imap);
|
|
|
|
goto found_imap;
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
|
|
|
|
|
|
|
|
/* Trim the mapping to the nearest shared extent boundary. */
|
2020-01-20 22:34:47 +00:00
|
|
|
error = xfs_bmap_trim_cow(ip, &imap, &shared);
|
2019-10-19 16:09:46 +00:00
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
/* Not shared? Just report the (potentially capped) extent. */
|
|
|
|
if (!shared) {
|
|
|
|
trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
|
|
|
|
&imap);
|
|
|
|
goto found_imap;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fork all the shared blocks from our write offset until the
|
|
|
|
* end of the extent.
|
|
|
|
*/
|
2019-10-19 16:09:47 +00:00
|
|
|
allocfork = XFS_COW_FORK;
|
2019-10-19 16:09:46 +00:00
|
|
|
end_fsb = imap.br_startoff + imap.br_blockcount;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We cap the maximum length we map here to MAX_WRITEBACK_PAGES
|
|
|
|
* pages to keep the chunks of work done where somewhat
|
|
|
|
* symmetric with the work writeback does. This is a completely
|
|
|
|
* arbitrary number pulled out of thin air.
|
|
|
|
*
|
|
|
|
* Note that the values needs to be less than 32-bits wide until
|
|
|
|
* the lower level functions are updated.
|
|
|
|
*/
|
|
|
|
count = min_t(loff_t, count, 1024 * PAGE_SIZE);
|
|
|
|
end_fsb = xfs_iomap_end_fsb(mp, offset, count);
|
|
|
|
|
|
|
|
if (xfs_is_always_cow_inode(ip))
|
2019-10-19 16:09:47 +00:00
|
|
|
allocfork = XFS_COW_FORK;
|
2019-10-19 16:09:46 +00:00
|
|
|
}
|
|
|
|
|
2020-05-23 16:43:30 +00:00
|
|
|
if (eof && offset + count > XFS_ISIZE(ip)) {
|
|
|
|
/*
|
|
|
|
* Determine the initial size of the preallocation.
|
|
|
|
* We clean up any extra preallocation when the file is closed.
|
|
|
|
*/
|
2021-08-19 01:46:52 +00:00
|
|
|
if (xfs_has_allocsize(mp))
|
2020-05-23 16:43:30 +00:00
|
|
|
prealloc_blocks = mp->m_allocsize_blocks;
|
2023-03-19 03:58:40 +00:00
|
|
|
else if (allocfork == XFS_DATA_FORK)
|
2020-05-23 16:43:30 +00:00
|
|
|
prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
|
|
|
|
offset, count, &icur);
|
2023-03-19 03:58:40 +00:00
|
|
|
else
|
|
|
|
prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
|
|
|
|
offset, count, &ccur);
|
2019-10-19 16:09:46 +00:00
|
|
|
if (prealloc_blocks) {
|
|
|
|
xfs_extlen_t align;
|
|
|
|
xfs_off_t end_offset;
|
|
|
|
xfs_fileoff_t p_end_fsb;
|
|
|
|
|
2019-10-28 15:41:44 +00:00
|
|
|
end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
|
2019-10-19 16:09:46 +00:00
|
|
|
p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
|
|
|
|
prealloc_blocks;
|
|
|
|
|
2019-10-30 19:24:58 +00:00
|
|
|
align = xfs_eof_alignment(ip);
|
2019-10-19 16:09:46 +00:00
|
|
|
if (align)
|
|
|
|
p_end_fsb = roundup_64(p_end_fsb, align);
|
|
|
|
|
|
|
|
p_end_fsb = min(p_end_fsb,
|
|
|
|
XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
|
|
|
|
ASSERT(p_end_fsb > offset_fsb);
|
|
|
|
prealloc_blocks = p_end_fsb - end_fsb;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-08 08:59:19 +00:00
|
|
|
/*
|
|
|
|
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
|
|
|
|
* them out if the write happens to fail.
|
|
|
|
*/
|
|
|
|
iomap_flags |= IOMAP_F_NEW;
|
2019-10-19 16:09:47 +00:00
|
|
|
if (allocfork == XFS_COW_FORK) {
|
xfs: restrict when we try to align cow fork delalloc to cowextsz hints
xfs/205 produces the following failure when always_cow is enabled:
--- a/tests/xfs/205.out 2024-02-28 16:20:24.437887970 -0800
+++ b/tests/xfs/205.out.bad 2024-06-03 21:13:40.584000000 -0700
@@ -1,4 +1,5 @@
QA output created by 205
*** one file
+ !!! disk full (expected)
*** one file, a few bytes at a time
*** done
This is the result of overly aggressive attempts to align cow fork
delalloc reservations to the CoW extent size hint. Looking at the trace
data, we're trying to append a single fsblock to the "fred" file.
Trying to create a speculative post-eof reservation fails because
there's not enough space.
We then set @prealloc_blocks to zero and try again, but the cowextsz
alignment code triggers, which expands our request for a 1-fsblock
reservation into a 39-block reservation. There's not enough space for
that, so the whole write fails with ENOSPC even though there's
sufficient space in the filesystem to allocate the single block that we
need to land the write.
There are two things wrong here -- first, we shouldn't be attempting
speculative preallocations beyond what was requested when we're low on
space. Second, if we've already computed a posteof preallocation, we
shouldn't bother trying to align that to the cowextsize hint.
Fix both of these problems by adding a flag that only enables the
expansion of the delalloc reservation to the cowextsize if we're doing a
non-extending write, and only if we're not doing an ENOSPC retry. This
requires us to move the ENOSPC retry logic to xfs_bmapi_reserve_delalloc.
I probably should have caught this six years ago when 6ca30729c206d was
being reviewed, but oh well. Update the comments to reflect what the
code does now.
Fixes: 6ca30729c206d ("xfs: bmap code cleanup")
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
2024-06-19 17:32:44 +00:00
|
|
|
error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
|
|
|
|
end_fsb - offset_fsb, prealloc_blocks, &cmap,
|
|
|
|
&ccur, cow_eof);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2019-10-19 16:09:47 +00:00
|
|
|
trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
|
2019-10-19 16:09:46 +00:00
|
|
|
goto found_cow;
|
|
|
|
}
|
|
|
|
|
xfs: restrict when we try to align cow fork delalloc to cowextsz hints
xfs/205 produces the following failure when always_cow is enabled:
--- a/tests/xfs/205.out 2024-02-28 16:20:24.437887970 -0800
+++ b/tests/xfs/205.out.bad 2024-06-03 21:13:40.584000000 -0700
@@ -1,4 +1,5 @@
QA output created by 205
*** one file
+ !!! disk full (expected)
*** one file, a few bytes at a time
*** done
This is the result of overly aggressive attempts to align cow fork
delalloc reservations to the CoW extent size hint. Looking at the trace
data, we're trying to append a single fsblock to the "fred" file.
Trying to create a speculative post-eof reservation fails because
there's not enough space.
We then set @prealloc_blocks to zero and try again, but the cowextsz
alignment code triggers, which expands our request for a 1-fsblock
reservation into a 39-block reservation. There's not enough space for
that, so the whole write fails with ENOSPC even though there's
sufficient space in the filesystem to allocate the single block that we
need to land the write.
There are two things wrong here -- first, we shouldn't be attempting
speculative preallocations beyond what was requested when we're low on
space. Second, if we've already computed a posteof preallocation, we
shouldn't bother trying to align that to the cowextsize hint.
Fix both of these problems by adding a flag that only enables the
expansion of the delalloc reservation to the cowextsize if we're doing a
non-extending write, and only if we're not doing an ENOSPC retry. This
requires us to move the ENOSPC retry logic to xfs_bmapi_reserve_delalloc.
I probably should have caught this six years ago when 6ca30729c206d was
being reviewed, but oh well. Update the comments to reflect what the
code does now.
Fixes: 6ca30729c206d ("xfs: bmap code cleanup")
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
2024-06-19 17:32:44 +00:00
|
|
|
error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
|
|
|
|
end_fsb - offset_fsb, prealloc_blocks, &imap, &icur,
|
|
|
|
eof);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2019-10-19 16:09:47 +00:00
|
|
|
trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
|
2019-10-19 16:09:46 +00:00
|
|
|
found_imap:
|
2024-10-08 08:59:19 +00:00
|
|
|
seq = xfs_iomap_inode_sequence(ip, iomap_flags);
|
2024-04-25 13:13:27 +00:00
|
|
|
xfs_iunlock(ip, lockmode);
|
2024-10-08 08:59:19 +00:00
|
|
|
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq);
|
2019-10-19 16:09:46 +00:00
|
|
|
|
xfs: convert delayed extents to unwritten when zeroing post eof blocks
Current clone operation could be non-atomic if the destination of a file
is beyond EOF, user could get a file with corrupted (zeroed) data on
crash.
The problem is about preallocations. If you write some data into a file:
[A...B)
and XFS decides to preallocate some post-eof blocks, then it can create
a delayed allocation reservation:
[A.........D)
The writeback path tries to convert delayed extents to real ones by
allocating blocks. If there aren't enough contiguous free space, we can
end up with two extents, the first real and the second still delalloc:
[A....C)[C.D)
After that, both the in-memory and the on-disk file sizes are still B.
If we clone into the range [E...F) from another file:
[A....C)[C.D) [E...F)
then xfs_reflink_zero_posteof() calls iomap_zero_range() to zero out the
range [B, E) beyond EOF and flush it. Since [C, D) is still a delalloc
extent, its pagecache will be zeroed and both the in-memory and on-disk
size will be updated to D after flushing but before cloning. This is
wrong, because the user can see the size change and read the zeroes
while the clone operation is ongoing.
We need to keep the in-memory and on-disk size before the clone
operation starts, so instead of writing zeroes through the page cache
for delayed ranges beyond EOF, we convert these ranges to unwritten and
invalidate any cached data over that range beyond EOF.
Suggested-by: Dave Chinner <david@fromorbit.com>
Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
2024-04-25 13:13:30 +00:00
|
|
|
convert_delay:
|
|
|
|
xfs_iunlock(ip, lockmode);
|
|
|
|
truncate_pagecache(inode, offset);
|
|
|
|
error = xfs_bmapi_convert_delalloc(ip, XFS_DATA_FORK, offset,
|
|
|
|
iomap, NULL);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
trace_xfs_iomap_alloc(ip, offset, count, XFS_DATA_FORK, &imap);
|
|
|
|
return 0;
|
|
|
|
|
2019-10-19 16:09:46 +00:00
|
|
|
found_cow:
|
|
|
|
if (imap.br_startoff <= offset_fsb) {
|
2024-10-08 08:59:20 +00:00
|
|
|
error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0,
|
|
|
|
xfs_iomap_inode_sequence(ip, 0));
|
2019-10-19 16:09:46 +00:00
|
|
|
if (error)
|
2022-11-28 22:09:17 +00:00
|
|
|
goto out_unlock;
|
2024-10-08 08:59:20 +00:00
|
|
|
} else {
|
|
|
|
xfs_trim_extent(&cmap, offset_fsb,
|
|
|
|
imap.br_startoff - offset_fsb);
|
2019-10-19 16:09:46 +00:00
|
|
|
}
|
2021-08-20 21:42:39 +00:00
|
|
|
|
2024-10-08 08:59:21 +00:00
|
|
|
iomap_flags |= IOMAP_F_SHARED;
|
2024-10-08 08:59:20 +00:00
|
|
|
seq = xfs_iomap_inode_sequence(ip, iomap_flags);
|
2024-04-25 13:13:27 +00:00
|
|
|
xfs_iunlock(ip, lockmode);
|
2024-10-08 08:59:20 +00:00
|
|
|
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, iomap_flags, seq);
|
2019-10-19 16:09:46 +00:00
|
|
|
|
|
|
|
out_unlock:
|
2024-04-25 13:13:27 +00:00
|
|
|
xfs_iunlock(ip, lockmode);
|
2019-10-19 16:09:46 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2024-09-10 04:39:07 +00:00
|
|
|
static void
|
2022-11-23 01:40:12 +00:00
|
|
|
xfs_buffered_write_delalloc_punch(
|
|
|
|
struct inode *inode,
|
2022-11-23 01:44:38 +00:00
|
|
|
loff_t offset,
|
2024-09-10 04:39:06 +00:00
|
|
|
loff_t length,
|
|
|
|
struct iomap *iomap)
|
2022-11-23 01:40:12 +00:00
|
|
|
{
|
2024-10-08 08:59:21 +00:00
|
|
|
xfs_bmap_punch_delalloc_range(XFS_I(inode),
|
|
|
|
(iomap->flags & IOMAP_F_SHARED) ?
|
|
|
|
XFS_COW_FORK : XFS_DATA_FORK,
|
|
|
|
offset, offset + length);
|
2022-11-23 01:40:12 +00:00
|
|
|
}
|
|
|
|
|
2016-06-20 23:53:44 +00:00
|
|
|
static int
|
2019-10-19 16:09:46 +00:00
|
|
|
xfs_buffered_write_iomap_end(
|
|
|
|
struct inode *inode,
|
2016-06-20 23:53:44 +00:00
|
|
|
loff_t offset,
|
|
|
|
loff_t length,
|
2017-03-08 17:58:08 +00:00
|
|
|
ssize_t written,
|
2019-10-19 16:09:46 +00:00
|
|
|
unsigned flags,
|
2017-03-08 17:58:08 +00:00
|
|
|
struct iomap *iomap)
|
2016-06-20 23:53:44 +00:00
|
|
|
{
|
2024-10-08 08:59:13 +00:00
|
|
|
loff_t start_byte, end_byte;
|
|
|
|
|
|
|
|
/* If we didn't reserve the blocks, we're not allowed to punch them. */
|
|
|
|
if (iomap->type != IOMAP_DELALLOC || !(iomap->flags & IOMAP_F_NEW))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Nothing to do if we've written the entire delalloc extent */
|
|
|
|
start_byte = iomap_last_written_block(inode, offset, written);
|
|
|
|
end_byte = round_up(offset + length, i_blocksize(inode));
|
|
|
|
if (start_byte >= end_byte)
|
|
|
|
return 0;
|
|
|
|
|
2024-10-08 08:59:17 +00:00
|
|
|
/* For zeroing operations the callers already hold invalidate_lock. */
|
|
|
|
if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) {
|
|
|
|
rwsem_assert_held_write(&inode->i_mapping->invalidate_lock);
|
|
|
|
iomap_write_delalloc_release(inode, start_byte, end_byte, flags,
|
|
|
|
iomap, xfs_buffered_write_delalloc_punch);
|
|
|
|
} else {
|
|
|
|
filemap_invalidate_lock(inode->i_mapping);
|
|
|
|
iomap_write_delalloc_release(inode, start_byte, end_byte, flags,
|
|
|
|
iomap, xfs_buffered_write_delalloc_punch);
|
|
|
|
filemap_invalidate_unlock(inode->i_mapping);
|
|
|
|
}
|
|
|
|
|
2016-06-20 23:53:44 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-19 16:09:46 +00:00
|
|
|
const struct iomap_ops xfs_buffered_write_iomap_ops = {
|
|
|
|
.iomap_begin = xfs_buffered_write_iomap_begin,
|
|
|
|
.iomap_end = xfs_buffered_write_iomap_end,
|
2016-06-20 23:53:44 +00:00
|
|
|
};
|
2016-08-16 22:45:30 +00:00
|
|
|
|
xfs: write page faults in iomap are not buffered writes
When we reserve a delalloc region in xfs_buffered_write_iomap_begin,
we mark the iomap as IOMAP_F_NEW so that the the write context
understands that it allocated the delalloc region.
If we then fail that buffered write, xfs_buffered_write_iomap_end()
checks for the IOMAP_F_NEW flag and if it is set, it punches out
the unused delalloc region that was allocated for the write.
The assumption this code makes is that all buffered write operations
that can allocate space are run under an exclusive lock (i_rwsem).
This is an invalid assumption: page faults in mmap()d regions call
through this same function pair to map the file range being faulted
and this runs only holding the inode->i_mapping->invalidate_lock in
shared mode.
IOWs, we can have races between page faults and write() calls that
fail the nested page cache write operation that result in data loss.
That is, the failing iomap_end call will punch out the data that
the other racing iomap iteration brought into the page cache. This
can be reproduced with generic/34[46] if we arbitrarily fail page
cache copy-in operations from write() syscalls.
Code analysis tells us that the iomap_page_mkwrite() function holds
the already instantiated and uptodate folio locked across the iomap
mapping iterations. Hence the folio cannot be removed from memory
whilst we are mapping the range it covers, and as such we do not
care if the mapping changes state underneath the iomap iteration
loop:
1. if the folio is not already dirty, there is no writeback races
possible.
2. if we allocated the mapping (delalloc or unwritten), the folio
cannot already be dirty. See #1.
3. If the folio is already dirty, it must be up to date. As we hold
it locked, it cannot be reclaimed from memory. Hence we always
have valid data in the page cache while iterating the mapping.
4. Valid data in the page cache can exist when the underlying
mapping is DELALLOC, UNWRITTEN or WRITTEN. Having the mapping
change from DELALLOC->UNWRITTEN or UNWRITTEN->WRITTEN does not
change the data in the page - it only affects actions if we are
initialising a new page. Hence #3 applies and we don't care
about these extent map transitions racing with
iomap_page_mkwrite().
5. iomap_page_mkwrite() checks for page invalidation races
(truncate, hole punch, etc) after it locks the folio. We also
hold the mapping->invalidation_lock here, and hence the mapping
cannot change due to extent removal operations while we are
iterating the folio.
As such, filesystems that don't use bufferheads will never fail
the iomap_folio_mkwrite_iter() operation on the current mapping,
regardless of whether the iomap should be considered stale.
Further, the range we are asked to iterate is limited to the range
inside EOF that the folio spans. Hence, for XFS, we will only map
the exact range we are asked for, and we will only do speculative
preallocation with delalloc if we are mapping a hole at the EOF
page. The iterator will consume the entire range of the folio that
is within EOF, and anything beyond the EOF block cannot be accessed.
We never need to truncate this post-EOF speculative prealloc away in
the context of the iomap_page_mkwrite() iterator because if it
remains unused we'll remove it when the last reference to the inode
goes away.
Hence we don't actually need an .iomap_end() cleanup/error handling
path at all for iomap_page_mkwrite() for XFS. This means we can
separate the page fault processing from the complexity of the
.iomap_end() processing in the buffered write path. This also means
that the buffered write path will also be able to take the
mapping->invalidate_lock as necessary.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
2022-11-06 23:09:11 +00:00
|
|
|
/*
|
|
|
|
* iomap_page_mkwrite() will never fail in a way that requires delalloc extents
|
|
|
|
* that it allocated to be revoked. Hence we do not need an .iomap_end method
|
|
|
|
* for this operation.
|
|
|
|
*/
|
|
|
|
const struct iomap_ops xfs_page_mkwrite_iomap_ops = {
|
|
|
|
.iomap_begin = xfs_buffered_write_iomap_begin,
|
|
|
|
};
|
|
|
|
|
2019-10-19 16:09:45 +00:00
|
|
|
static int
|
|
|
|
xfs_read_iomap_begin(
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t length,
|
|
|
|
unsigned flags,
|
|
|
|
struct iomap *iomap,
|
|
|
|
struct iomap *srcmap)
|
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_bmbt_irec imap;
|
|
|
|
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
|
|
|
|
int nimaps = 1, error = 0;
|
|
|
|
bool shared = false;
|
2022-06-23 17:51:56 +00:00
|
|
|
unsigned int lockmode = XFS_ILOCK_SHARED;
|
2022-11-28 22:09:17 +00:00
|
|
|
u64 seq;
|
2019-10-19 16:09:45 +00:00
|
|
|
|
|
|
|
ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
|
|
|
|
|
2021-08-19 01:46:53 +00:00
|
|
|
if (xfs_is_shutdown(mp))
|
2019-10-19 16:09:45 +00:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
error = xfs_ilock_for_iomap(ip, flags, &lockmode);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
|
|
|
|
&nimaps, 0);
|
2022-12-01 15:28:54 +00:00
|
|
|
if (!error && ((flags & IOMAP_REPORT) || IS_DAX(inode)))
|
2019-10-19 16:09:45 +00:00
|
|
|
error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
|
2022-11-28 22:09:17 +00:00
|
|
|
seq = xfs_iomap_inode_sequence(ip, shared ? IOMAP_F_SHARED : 0);
|
2019-10-19 16:09:45 +00:00
|
|
|
xfs_iunlock(ip, lockmode);
|
|
|
|
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
|
2021-11-29 10:21:57 +00:00
|
|
|
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
|
2022-11-28 22:09:17 +00:00
|
|
|
shared ? IOMAP_F_SHARED : 0, seq);
|
2019-10-19 16:09:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const struct iomap_ops xfs_read_iomap_ops = {
|
|
|
|
.iomap_begin = xfs_read_iomap_begin,
|
|
|
|
};
|
|
|
|
|
2019-02-18 17:38:46 +00:00
|
|
|
static int
|
|
|
|
xfs_seek_iomap_begin(
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t length,
|
|
|
|
unsigned flags,
|
2019-10-18 23:44:10 +00:00
|
|
|
struct iomap *iomap,
|
|
|
|
struct iomap *srcmap)
|
2019-02-18 17:38:46 +00:00
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
|
|
|
|
xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
|
|
|
|
struct xfs_iext_cursor icur;
|
|
|
|
struct xfs_bmbt_irec imap, cmap;
|
|
|
|
int error = 0;
|
|
|
|
unsigned lockmode;
|
2022-11-28 22:09:17 +00:00
|
|
|
u64 seq;
|
2019-02-18 17:38:46 +00:00
|
|
|
|
2021-08-19 01:46:53 +00:00
|
|
|
if (xfs_is_shutdown(mp))
|
2019-02-18 17:38:46 +00:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
lockmode = xfs_ilock_data_map_shared(ip);
|
2021-04-13 18:15:09 +00:00
|
|
|
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
2019-02-18 17:38:46 +00:00
|
|
|
|
|
|
|
if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
|
|
|
|
/*
|
|
|
|
* If we found a data extent we are done.
|
|
|
|
*/
|
|
|
|
if (imap.br_startoff <= offset_fsb)
|
|
|
|
goto done;
|
|
|
|
data_fsb = imap.br_startoff;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Fake a hole until the end of the file.
|
|
|
|
*/
|
2019-10-19 16:09:44 +00:00
|
|
|
data_fsb = xfs_iomap_end_fsb(mp, offset, length);
|
2019-02-18 17:38:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If a COW fork extent covers the hole, report it - capped to the next
|
|
|
|
* data fork extent:
|
|
|
|
*/
|
|
|
|
if (xfs_inode_has_cow_data(ip) &&
|
|
|
|
xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
|
|
|
|
cow_fsb = cmap.br_startoff;
|
|
|
|
if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
|
|
|
|
if (data_fsb < cow_fsb + cmap.br_blockcount)
|
|
|
|
end_fsb = min(end_fsb, data_fsb);
|
2024-02-20 22:49:28 +00:00
|
|
|
xfs_trim_extent(&cmap, offset_fsb, end_fsb - offset_fsb);
|
2022-11-28 22:09:17 +00:00
|
|
|
seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
|
2021-11-29 10:21:57 +00:00
|
|
|
error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
|
2022-11-28 22:09:17 +00:00
|
|
|
IOMAP_F_SHARED, seq);
|
2019-02-18 17:38:46 +00:00
|
|
|
/*
|
|
|
|
* This is a COW extent, so we must probe the page cache
|
|
|
|
* because there could be dirty page cache being backed
|
|
|
|
* by this extent.
|
|
|
|
*/
|
|
|
|
iomap->type = IOMAP_UNWRITTEN;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Else report a hole, capped to the next found data or COW extent.
|
|
|
|
*/
|
|
|
|
if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
|
|
|
|
imap.br_blockcount = cow_fsb - offset_fsb;
|
|
|
|
else
|
|
|
|
imap.br_blockcount = data_fsb - offset_fsb;
|
|
|
|
imap.br_startoff = offset_fsb;
|
|
|
|
imap.br_startblock = HOLESTARTBLOCK;
|
|
|
|
imap.br_state = XFS_EXT_NORM;
|
|
|
|
done:
|
2022-11-28 22:09:17 +00:00
|
|
|
seq = xfs_iomap_inode_sequence(ip, 0);
|
2024-02-20 22:49:28 +00:00
|
|
|
xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
|
2022-11-28 22:09:17 +00:00
|
|
|
error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
|
2019-02-18 17:38:46 +00:00
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, lockmode);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct iomap_ops xfs_seek_iomap_ops = {
|
|
|
|
.iomap_begin = xfs_seek_iomap_begin,
|
|
|
|
};
|
|
|
|
|
2016-08-16 22:45:30 +00:00
|
|
|
static int
|
|
|
|
xfs_xattr_iomap_begin(
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t offset,
|
|
|
|
loff_t length,
|
|
|
|
unsigned flags,
|
2019-10-18 23:44:10 +00:00
|
|
|
struct iomap *iomap,
|
|
|
|
struct iomap *srcmap)
|
2016-08-16 22:45:30 +00:00
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
|
|
|
|
struct xfs_bmbt_irec imap;
|
|
|
|
int nimaps = 1, error = 0;
|
|
|
|
unsigned lockmode;
|
2022-11-28 22:09:17 +00:00
|
|
|
int seq;
|
2016-08-16 22:45:30 +00:00
|
|
|
|
2021-08-19 01:46:53 +00:00
|
|
|
if (xfs_is_shutdown(mp))
|
2016-08-16 22:45:30 +00:00
|
|
|
return -EIO;
|
|
|
|
|
2017-04-06 23:00:39 +00:00
|
|
|
lockmode = xfs_ilock_attr_map_shared(ip);
|
2016-08-16 22:45:30 +00:00
|
|
|
|
|
|
|
/* if there are no attribute fork or extents, return ENOENT */
|
2022-07-09 17:56:06 +00:00
|
|
|
if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) {
|
2016-08-16 22:45:30 +00:00
|
|
|
error = -ENOENT;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
xfs: make inode attribute forks a permanent part of struct xfs_inode
Syzkaller reported a UAF bug a while back:
==================================================================
BUG: KASAN: use-after-free in xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127
Read of size 4 at addr ffff88802cec919c by task syz-executor262/2958
CPU: 2 PID: 2958 Comm: syz-executor262 Not tainted
5.15.0-0.30.3-20220406_1406 #3
Hardware name: Red Hat KVM, BIOS 1.13.0-2.module+el8.3.0+7860+a7792d29
04/01/2014
Call Trace:
<TASK>
__dump_stack lib/dump_stack.c:88 [inline]
dump_stack_lvl+0x82/0xa9 lib/dump_stack.c:106
print_address_description.constprop.9+0x21/0x2d5 mm/kasan/report.c:256
__kasan_report mm/kasan/report.c:442 [inline]
kasan_report.cold.14+0x7f/0x11b mm/kasan/report.c:459
xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127
xfs_attr_get+0x378/0x4c2 fs/xfs/libxfs/xfs_attr.c:159
xfs_xattr_get+0xe3/0x150 fs/xfs/xfs_xattr.c:36
__vfs_getxattr+0xdf/0x13d fs/xattr.c:399
cap_inode_need_killpriv+0x41/0x5d security/commoncap.c:300
security_inode_need_killpriv+0x4c/0x97 security/security.c:1408
dentry_needs_remove_privs.part.28+0x21/0x63 fs/inode.c:1912
dentry_needs_remove_privs+0x80/0x9e fs/inode.c:1908
do_truncate+0xc3/0x1e0 fs/open.c:56
handle_truncate fs/namei.c:3084 [inline]
do_open fs/namei.c:3432 [inline]
path_openat+0x30ab/0x396d fs/namei.c:3561
do_filp_open+0x1c4/0x290 fs/namei.c:3588
do_sys_openat2+0x60d/0x98c fs/open.c:1212
do_sys_open+0xcf/0x13c fs/open.c:1228
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0x0
RIP: 0033:0x7f7ef4bb753d
Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48
89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73
01 c3 48 8b 0d 1b 79 2c 00 f7 d8 64 89 01 48
RSP: 002b:00007f7ef52c2ed8 EFLAGS: 00000246 ORIG_RAX: 0000000000000055
RAX: ffffffffffffffda RBX: 0000000000404148 RCX: 00007f7ef4bb753d
RDX: 00007f7ef4bb753d RSI: 0000000000000000 RDI: 0000000020004fc0
RBP: 0000000000404140 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 0030656c69662f2e
R13: 00007ffd794db37f R14: 00007ffd794db470 R15: 00007f7ef52c2fc0
</TASK>
Allocated by task 2953:
kasan_save_stack+0x19/0x38 mm/kasan/common.c:38
kasan_set_track mm/kasan/common.c:46 [inline]
set_alloc_info mm/kasan/common.c:434 [inline]
__kasan_slab_alloc+0x68/0x7c mm/kasan/common.c:467
kasan_slab_alloc include/linux/kasan.h:254 [inline]
slab_post_alloc_hook mm/slab.h:519 [inline]
slab_alloc_node mm/slub.c:3213 [inline]
slab_alloc mm/slub.c:3221 [inline]
kmem_cache_alloc+0x11b/0x3eb mm/slub.c:3226
kmem_cache_zalloc include/linux/slab.h:711 [inline]
xfs_ifork_alloc+0x25/0xa2 fs/xfs/libxfs/xfs_inode_fork.c:287
xfs_bmap_add_attrfork+0x3f2/0x9b1 fs/xfs/libxfs/xfs_bmap.c:1098
xfs_attr_set+0xe38/0x12a7 fs/xfs/libxfs/xfs_attr.c:746
xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59
__vfs_setxattr+0x11b/0x177 fs/xattr.c:180
__vfs_setxattr_noperm+0x128/0x5e0 fs/xattr.c:214
__vfs_setxattr_locked+0x1d4/0x258 fs/xattr.c:275
vfs_setxattr+0x154/0x33d fs/xattr.c:301
setxattr+0x216/0x29f fs/xattr.c:575
__do_sys_fsetxattr fs/xattr.c:632 [inline]
__se_sys_fsetxattr fs/xattr.c:621 [inline]
__x64_sys_fsetxattr+0x243/0x2fe fs/xattr.c:621
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0x0
Freed by task 2949:
kasan_save_stack+0x19/0x38 mm/kasan/common.c:38
kasan_set_track+0x1c/0x21 mm/kasan/common.c:46
kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:360
____kasan_slab_free mm/kasan/common.c:366 [inline]
____kasan_slab_free mm/kasan/common.c:328 [inline]
__kasan_slab_free+0xe2/0x10e mm/kasan/common.c:374
kasan_slab_free include/linux/kasan.h:230 [inline]
slab_free_hook mm/slub.c:1700 [inline]
slab_free_freelist_hook mm/slub.c:1726 [inline]
slab_free mm/slub.c:3492 [inline]
kmem_cache_free+0xdc/0x3ce mm/slub.c:3508
xfs_attr_fork_remove+0x8d/0x132 fs/xfs/libxfs/xfs_attr_leaf.c:773
xfs_attr_sf_removename+0x5dd/0x6cb fs/xfs/libxfs/xfs_attr_leaf.c:822
xfs_attr_remove_iter+0x68c/0x805 fs/xfs/libxfs/xfs_attr.c:1413
xfs_attr_remove_args+0xb1/0x10d fs/xfs/libxfs/xfs_attr.c:684
xfs_attr_set+0xf1e/0x12a7 fs/xfs/libxfs/xfs_attr.c:802
xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59
__vfs_removexattr+0x106/0x16a fs/xattr.c:468
cap_inode_killpriv+0x24/0x47 security/commoncap.c:324
security_inode_killpriv+0x54/0xa1 security/security.c:1414
setattr_prepare+0x1a6/0x897 fs/attr.c:146
xfs_vn_change_ok+0x111/0x15e fs/xfs/xfs_iops.c:682
xfs_vn_setattr_size+0x5f/0x15a fs/xfs/xfs_iops.c:1065
xfs_vn_setattr+0x125/0x2ad fs/xfs/xfs_iops.c:1093
notify_change+0xae5/0x10a1 fs/attr.c:410
do_truncate+0x134/0x1e0 fs/open.c:64
handle_truncate fs/namei.c:3084 [inline]
do_open fs/namei.c:3432 [inline]
path_openat+0x30ab/0x396d fs/namei.c:3561
do_filp_open+0x1c4/0x290 fs/namei.c:3588
do_sys_openat2+0x60d/0x98c fs/open.c:1212
do_sys_open+0xcf/0x13c fs/open.c:1228
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0x0
The buggy address belongs to the object at ffff88802cec9188
which belongs to the cache xfs_ifork of size 40
The buggy address is located 20 bytes inside of
40-byte region [ffff88802cec9188, ffff88802cec91b0)
The buggy address belongs to the page:
page:00000000c3af36a1 refcount:1 mapcount:0 mapping:0000000000000000
index:0x0 pfn:0x2cec9
flags: 0xfffffc0000200(slab|node=0|zone=1|lastcpupid=0x1fffff)
raw: 000fffffc0000200 ffffea00009d2580 0000000600000006 ffff88801a9ffc80
raw: 0000000000000000 0000000080490049 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff88802cec9080: fb fb fb fc fc fa fb fb fb fb fc fc fb fb fb fb
ffff88802cec9100: fb fc fc fb fb fb fb fb fc fc fb fb fb fb fb fc
>ffff88802cec9180: fc fa fb fb fb fb fc fc fa fb fb fb fb fc fc fb
^
ffff88802cec9200: fb fb fb fb fc fc fb fb fb fb fb fc fc fb fb fb
ffff88802cec9280: fb fb fc fc fa fb fb fb fb fc fc fa fb fb fb fb
==================================================================
The root cause of this bug is the unlocked access to xfs_inode.i_afp
from the getxattr code paths while trying to determine which ILOCK mode
to use to stabilize the xattr data. Unfortunately, the VFS does not
acquire i_rwsem when vfs_getxattr (or listxattr) call into the
filesystem, which means that getxattr can race with a removexattr that's
tearing down the attr fork and crash:
xfs_attr_set: xfs_attr_get:
xfs_attr_fork_remove: xfs_ilock_attr_map_shared:
xfs_idestroy_fork(ip->i_afp);
kmem_cache_free(xfs_ifork_cache, ip->i_afp);
if (ip->i_afp &&
ip->i_afp = NULL;
xfs_need_iread_extents(ip->i_afp))
<KABOOM>
ip->i_forkoff = 0;
Regrettably, the VFS is much more lax about i_rwsem and getxattr than
is immediately obvious -- not only does it not guarantee that we hold
i_rwsem, it actually doesn't guarantee that we *don't* hold it either.
The getxattr system call won't acquire the lock before calling XFS, but
the file capabilities code calls getxattr with and without i_rwsem held
to determine if the "security.capabilities" xattr is set on the file.
Fixing the VFS locking requires a treewide investigation into every code
path that could touch an xattr and what i_rwsem state it expects or sets
up. That could take years or even prove impossible; fortunately, we
can fix this UAF problem inside XFS.
An earlier version of this patch used smp_wmb in xfs_attr_fork_remove to
ensure that i_forkoff is always zeroed before i_afp is set to null and
changed the read paths to use smp_rmb before accessing i_forkoff and
i_afp, which avoided these UAF problems. However, the patch author was
too busy dealing with other problems in the meantime, and by the time he
came back to this issue, the situation had changed a bit.
On a modern system with selinux, each inode will always have at least
one xattr for the selinux label, so it doesn't make much sense to keep
incurring the extra pointer dereference. Furthermore, Allison's
upcoming parent pointer patchset will also cause nearly every inode in
the filesystem to have extended attributes. Therefore, make the inode
attribute fork structure part of struct xfs_inode, at a cost of 40 more
bytes.
This patch adds a clunky if_present field where necessary to maintain
the existing logic of xattr fork null pointer testing in the existing
codebase. The next patch switches the logic over to XFS_IFORK_Q and it
all goes away.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-07-09 17:56:06 +00:00
|
|
|
ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL);
|
2016-08-16 22:45:30 +00:00
|
|
|
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
|
2017-12-07 00:13:35 +00:00
|
|
|
&nimaps, XFS_BMAPI_ATTRFORK);
|
2016-08-16 22:45:30 +00:00
|
|
|
out_unlock:
|
2022-11-28 22:09:17 +00:00
|
|
|
|
|
|
|
seq = xfs_iomap_inode_sequence(ip, IOMAP_F_XATTR);
|
2016-08-16 22:45:30 +00:00
|
|
|
xfs_iunlock(ip, lockmode);
|
|
|
|
|
2019-02-18 17:38:46 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
ASSERT(nimaps);
|
2022-11-28 22:09:17 +00:00
|
|
|
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_XATTR, seq);
|
2016-08-16 22:45:30 +00:00
|
|
|
}
|
|
|
|
|
2017-01-28 07:20:26 +00:00
|
|
|
const struct iomap_ops xfs_xattr_iomap_ops = {
|
2016-08-16 22:45:30 +00:00
|
|
|
.iomap_begin = xfs_xattr_iomap_begin,
|
|
|
|
};
|
2021-11-29 10:21:49 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
xfs_zero_range(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
loff_t pos,
|
|
|
|
loff_t len,
|
|
|
|
bool *did_zero)
|
|
|
|
{
|
|
|
|
struct inode *inode = VFS_I(ip);
|
|
|
|
|
2024-10-08 08:59:16 +00:00
|
|
|
xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
|
|
|
|
|
2021-11-29 10:21:52 +00:00
|
|
|
if (IS_DAX(inode))
|
|
|
|
return dax_zero_range(inode, pos, len, did_zero,
|
2022-12-01 15:32:10 +00:00
|
|
|
&xfs_dax_write_iomap_ops);
|
2021-11-29 10:21:49 +00:00
|
|
|
return iomap_zero_range(inode, pos, len, did_zero,
|
|
|
|
&xfs_buffered_write_iomap_ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
xfs_truncate_page(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
loff_t pos,
|
|
|
|
bool *did_zero)
|
|
|
|
{
|
|
|
|
struct inode *inode = VFS_I(ip);
|
|
|
|
|
2021-11-29 10:21:52 +00:00
|
|
|
if (IS_DAX(inode))
|
|
|
|
return dax_truncate_page(inode, pos, did_zero,
|
2022-12-01 15:32:10 +00:00
|
|
|
&xfs_dax_write_iomap_ops);
|
2021-11-29 10:21:49 +00:00
|
|
|
return iomap_truncate_page(inode, pos, did_zero,
|
|
|
|
&xfs_buffered_write_iomap_ops);
|
|
|
|
}
|