2018-06-06 02:42:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2013-08-12 10:49:33 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2019-06-29 02:25:35 +00:00
|
|
|
#include "xfs_shared.h"
|
2013-08-12 10:49:33 +00:00
|
|
|
#include "xfs_format.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2013-08-12 10:49:33 +00:00
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_inode.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_trans.h"
|
2013-08-12 10:49:33 +00:00
|
|
|
#include "xfs_inode_item.h"
|
2017-02-02 23:13:59 +00:00
|
|
|
#include "xfs_btree.h"
|
2013-10-22 23:51:50 +00:00
|
|
|
#include "xfs_bmap_btree.h"
|
2013-08-12 10:49:33 +00:00
|
|
|
#include "xfs_bmap.h"
|
|
|
|
#include "xfs_error.h"
|
|
|
|
#include "xfs_trace.h"
|
2016-02-08 04:00:01 +00:00
|
|
|
#include "xfs_da_format.h"
|
2017-03-15 07:24:25 +00:00
|
|
|
#include "xfs_da_btree.h"
|
|
|
|
#include "xfs_dir2_priv.h"
|
2018-01-08 18:51:06 +00:00
|
|
|
#include "xfs_attr_leaf.h"
|
2021-01-23 00:48:10 +00:00
|
|
|
#include "xfs_types.h"
|
2021-01-23 00:48:15 +00:00
|
|
|
#include "xfs_errortag.h"
|
2013-08-12 10:49:33 +00:00
|
|
|
|
2021-10-12 18:09:23 +00:00
|
|
|
struct kmem_cache *xfs_ifork_cache;
|
2013-08-12 10:49:33 +00:00
|
|
|
|
2016-04-05 21:41:43 +00:00
|
|
|
void
|
|
|
|
xfs_init_local_fork(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int whichfork,
|
|
|
|
const void *data,
|
xfs: fix inode fork extent count overflow
[commit message is verbose for discussion purposes - will trim it
down later. Some questions about implementation details at the end.]
Zorro Lang recently ran a new test to stress single inode extent
counts now that they are no longer limited by memory allocation.
The test was simply:
# xfs_io -f -c "falloc 0 40t" /mnt/scratch/big-file
# ~/src/xfstests-dev/punch-alternating /mnt/scratch/big-file
This test uncovered a problem where the hole punching operation
appeared to finish with no error, but apparently only created 268M
extents instead of the 10 billion it was supposed to.
Further, trying to punch out extents that should have been present
resulted in success, but no change in the extent count. It looked
like a silent failure.
While running the test and observing the behaviour in real time,
I observed the extent coutn growing at ~2M extents/minute, and saw
this after about an hour:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next ; \
> sleep 60 ; \
> xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 127657993
fsxattr.nextents = 129683339
#
And a few minutes later this:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 4177861124
#
Ah, what? Where did that 4 billion extra extents suddenly come from?
Stop the workload, unmount, mount:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 166044375
#
And it's back at the expected number. i.e. the extent count is
correct on disk, but it's screwed up in memory. I loaded up the
extent list, and immediately:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 4192576215
#
It's bad again. So, where does that number come from?
xfs_fill_fsxattr():
if (ip->i_df.if_flags & XFS_IFEXTENTS)
fa->fsx_nextents = xfs_iext_count(&ip->i_df);
else
fa->fsx_nextents = ip->i_d.di_nextents;
And that's the behaviour I just saw in a nutshell. The on disk count
is correct, but once the tree is loaded into memory, it goes whacky.
Clearly there's something wrong with xfs_iext_count():
inline xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp)
{
return ifp->if_bytes / sizeof(struct xfs_iext_rec);
}
Simple enough, but 134M extents is 2**27, and that's right about
where things went wrong. A struct xfs_iext_rec is 16 bytes in size,
which means 2**27 * 2**4 = 2**31 and we're right on target for an
integer overflow. And, sure enough:
struct xfs_ifork {
int if_bytes; /* bytes in if_u1 */
....
Once we get 2**27 extents in a file, we overflow if_bytes and the
in-core extent count goes wrong. And when we reach 2**28 extents,
if_bytes wraps back to zero and things really start to go wrong
there. This is where the silent failure comes from - only the first
2**28 extents can be looked up directly due to the overflow, all the
extents above this index wrap back to somewhere in the first 2**28
extents. Hence with a regular pattern, trying to punch a hole in the
range that didn't have holes mapped to a hole in the first 2**28
extents and so "succeeded" without changing anything. Hence "silent
failure"...
Fix this by converting if_bytes to a int64_t and converting all the
index variables and size calculations to use int64_t types to avoid
overflows in future. Signed integers are still used to enable easy
detection of extent count underflows. This enables scalability of
extent counts to the limits of the on-disk format - MAXEXTNUM
(2**31) extents.
Current testing is at over 500M extents and still going:
fsxattr.nextents = 517310478
Reported-by: Zorro Lang <zlang@redhat.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-10-17 20:40:33 +00:00
|
|
|
int64_t size)
|
2016-04-05 21:41:43 +00:00
|
|
|
{
|
|
|
|
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
2016-04-05 21:53:29 +00:00
|
|
|
int mem_size = size, real_size = 0;
|
|
|
|
bool zero_terminate;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are using the local fork to store a symlink body we need to
|
|
|
|
* zero-terminate it so that we can pass it back to the VFS directly.
|
|
|
|
* Overallocate the in-memory fork by one for that and add a zero
|
|
|
|
* to terminate it below.
|
|
|
|
*/
|
|
|
|
zero_terminate = S_ISLNK(VFS_I(ip)->i_mode);
|
|
|
|
if (zero_terminate)
|
|
|
|
mem_size++;
|
2016-04-05 21:41:43 +00:00
|
|
|
|
2017-11-03 17:34:45 +00:00
|
|
|
if (size) {
|
2016-04-05 21:53:29 +00:00
|
|
|
real_size = roundup(mem_size, 4);
|
2019-08-26 19:06:22 +00:00
|
|
|
ifp->if_u1.if_data = kmem_alloc(real_size, KM_NOFS);
|
2016-04-05 21:41:43 +00:00
|
|
|
memcpy(ifp->if_u1.if_data, data, size);
|
2016-04-05 21:53:29 +00:00
|
|
|
if (zero_terminate)
|
|
|
|
ifp->if_u1.if_data[size] = '\0';
|
2017-11-03 17:34:45 +00:00
|
|
|
} else {
|
|
|
|
ifp->if_u1.if_data = NULL;
|
2016-04-05 21:53:29 +00:00
|
|
|
}
|
2016-04-05 21:41:43 +00:00
|
|
|
|
|
|
|
ifp->if_bytes = size;
|
|
|
|
}
|
|
|
|
|
2013-08-12 10:49:33 +00:00
|
|
|
/*
|
|
|
|
* The file is in-lined in the on-disk inode.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_iformat_local(
|
2021-10-11 23:11:21 +00:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_dinode *dip,
|
|
|
|
int whichfork,
|
|
|
|
int size)
|
2013-08-12 10:49:33 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the size is unreasonable, then something
|
|
|
|
* is wrong and we just bail out rather than crash in
|
|
|
|
* kmem_alloc() or memcpy() below.
|
|
|
|
*/
|
|
|
|
if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
|
|
|
|
xfs_warn(ip->i_mount,
|
2020-03-18 15:15:10 +00:00
|
|
|
"corrupt inode %Lu (bad size %d for local fork, size = %zd).",
|
2013-08-12 10:49:33 +00:00
|
|
|
(unsigned long long) ip->i_ino, size,
|
|
|
|
XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
|
2018-03-23 17:06:52 +00:00
|
|
|
xfs_inode_verifier_error(ip, -EFSCORRUPTED,
|
|
|
|
"xfs_iformat_local", dip, sizeof(*dip),
|
|
|
|
__this_address);
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EFSCORRUPTED;
|
2013-08-12 10:49:33 +00:00
|
|
|
}
|
2016-04-05 21:41:43 +00:00
|
|
|
|
|
|
|
xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
|
2013-08-12 10:49:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-04-20 16:42:48 +00:00
|
|
|
* The file consists of a set of extents all of which fit into the on-disk
|
2017-11-03 17:34:45 +00:00
|
|
|
* inode.
|
2013-08-12 10:49:33 +00:00
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_iformat_extents(
|
2017-04-20 16:42:48 +00:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_dinode *dip,
|
|
|
|
int whichfork)
|
2013-08-12 10:49:33 +00:00
|
|
|
{
|
2017-04-20 16:42:48 +00:00
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
2017-10-19 18:06:29 +00:00
|
|
|
int state = xfs_bmap_fork_to_state(whichfork);
|
2017-04-20 16:42:48 +00:00
|
|
|
int nex = XFS_DFORK_NEXTENTS(dip, whichfork);
|
|
|
|
int size = nex * sizeof(xfs_bmbt_rec_t);
|
2017-11-03 17:34:43 +00:00
|
|
|
struct xfs_iext_cursor icur;
|
2017-04-20 16:42:48 +00:00
|
|
|
struct xfs_bmbt_rec *dp;
|
2017-11-03 17:34:46 +00:00
|
|
|
struct xfs_bmbt_irec new;
|
2017-04-20 16:42:48 +00:00
|
|
|
int i;
|
2013-08-12 10:49:33 +00:00
|
|
|
|
|
|
|
/*
|
2017-04-20 16:42:48 +00:00
|
|
|
* If the number of extents is unreasonable, then something is wrong and
|
|
|
|
* we just bail out rather than crash in kmem_alloc() or memcpy() below.
|
2013-08-12 10:49:33 +00:00
|
|
|
*/
|
2017-04-20 16:42:48 +00:00
|
|
|
if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, mp, whichfork))) {
|
2013-08-12 10:49:33 +00:00
|
|
|
xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).",
|
|
|
|
(unsigned long long) ip->i_ino, nex);
|
2018-03-23 17:06:52 +00:00
|
|
|
xfs_inode_verifier_error(ip, -EFSCORRUPTED,
|
|
|
|
"xfs_iformat_extents(1)", dip, sizeof(*dip),
|
|
|
|
__this_address);
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EFSCORRUPTED;
|
2013-08-12 10:49:33 +00:00
|
|
|
}
|
|
|
|
|
2017-11-03 17:34:46 +00:00
|
|
|
ifp->if_bytes = 0;
|
|
|
|
ifp->if_u1.if_root = NULL;
|
|
|
|
ifp->if_height = 0;
|
2013-08-12 10:49:33 +00:00
|
|
|
if (size) {
|
|
|
|
dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
|
2017-11-03 17:34:43 +00:00
|
|
|
|
|
|
|
xfs_iext_first(ifp, &icur);
|
2013-08-12 10:49:33 +00:00
|
|
|
for (i = 0; i < nex; i++, dp++) {
|
2018-03-23 17:06:52 +00:00
|
|
|
xfs_failaddr_t fa;
|
|
|
|
|
2017-11-03 17:34:47 +00:00
|
|
|
xfs_bmbt_disk_get_all(dp, &new);
|
2018-03-23 17:06:52 +00:00
|
|
|
fa = xfs_bmap_validate_extent(ip, whichfork, &new);
|
|
|
|
if (fa) {
|
|
|
|
xfs_inode_verifier_error(ip, -EFSCORRUPTED,
|
|
|
|
"xfs_iformat_extents(2)",
|
|
|
|
dp, sizeof(*dp), fa);
|
2017-04-20 16:42:48 +00:00
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
2017-11-03 17:34:43 +00:00
|
|
|
|
2017-11-03 17:34:46 +00:00
|
|
|
xfs_iext_insert(ip, &icur, &new, state);
|
2017-11-03 17:34:43 +00:00
|
|
|
trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
|
|
|
|
xfs_iext_next(ifp, &icur);
|
2013-08-12 10:49:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The file has too many extents to fit into
|
|
|
|
* the inode, so they are in B-tree format.
|
|
|
|
* Allocate a buffer for the root of the B-tree
|
|
|
|
* and copy the root into it. The i_extents
|
|
|
|
* field will remain NULL until all of the
|
|
|
|
* extents are read in (when they are needed).
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_iformat_btree(
|
2021-10-11 23:11:21 +00:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_dinode *dip,
|
2013-08-12 10:49:33 +00:00
|
|
|
int whichfork)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
xfs_bmdr_block_t *dfp;
|
2018-07-17 23:51:50 +00:00
|
|
|
struct xfs_ifork *ifp;
|
2013-08-12 10:49:33 +00:00
|
|
|
/* REFERENCED */
|
|
|
|
int nrecs;
|
|
|
|
int size;
|
2017-02-02 23:13:59 +00:00
|
|
|
int level;
|
2013-08-12 10:49:33 +00:00
|
|
|
|
|
|
|
ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
|
dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
|
|
|
|
size = XFS_BMAP_BROOT_SPACE(mp, dfp);
|
|
|
|
nrecs = be16_to_cpu(dfp->bb_numrecs);
|
2017-02-02 23:13:59 +00:00
|
|
|
level = be16_to_cpu(dfp->bb_level);
|
2013-08-12 10:49:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* blow out if -- fork has less extents than can fit in
|
|
|
|
* fork (fork shouldn't be a btree format), root btree
|
|
|
|
* block has more records than can fit into the fork,
|
|
|
|
* or the number of extents is greater than the number of
|
|
|
|
* blocks.
|
|
|
|
*/
|
2020-05-18 17:27:22 +00:00
|
|
|
if (unlikely(ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork) ||
|
2018-01-17 02:54:13 +00:00
|
|
|
nrecs == 0 ||
|
2013-08-12 10:49:33 +00:00
|
|
|
XFS_BMDR_SPACE_CALC(nrecs) >
|
|
|
|
XFS_DFORK_SIZE(dip, mp, whichfork) ||
|
2021-03-29 18:11:40 +00:00
|
|
|
ifp->if_nextents > ip->i_nblocks) ||
|
2021-03-22 16:51:54 +00:00
|
|
|
level == 0 || level > XFS_BM_MAXLEVELS(mp, whichfork)) {
|
2013-08-12 10:49:33 +00:00
|
|
|
xfs_warn(mp, "corrupt inode %Lu (btree).",
|
|
|
|
(unsigned long long) ip->i_ino);
|
2018-03-23 17:06:52 +00:00
|
|
|
xfs_inode_verifier_error(ip, -EFSCORRUPTED,
|
|
|
|
"xfs_iformat_btree", dfp, size,
|
|
|
|
__this_address);
|
2014-06-25 04:58:08 +00:00
|
|
|
return -EFSCORRUPTED;
|
2013-08-12 10:49:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ifp->if_broot_bytes = size;
|
2019-08-26 19:06:22 +00:00
|
|
|
ifp->if_broot = kmem_alloc(size, KM_NOFS);
|
2013-08-12 10:49:33 +00:00
|
|
|
ASSERT(ifp->if_broot != NULL);
|
|
|
|
/*
|
|
|
|
* Copy and convert from the on-disk structure
|
|
|
|
* to the in-memory structure.
|
|
|
|
*/
|
|
|
|
xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
|
|
|
|
ifp->if_broot, size);
|
|
|
|
|
2017-11-03 17:34:46 +00:00
|
|
|
ifp->if_bytes = 0;
|
|
|
|
ifp->if_u1.if_root = NULL;
|
|
|
|
ifp->if_height = 0;
|
2013-08-12 10:49:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-14 21:01:17 +00:00
|
|
|
int
|
|
|
|
xfs_iformat_data_fork(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_dinode *dip)
|
|
|
|
{
|
|
|
|
struct inode *inode = VFS_I(ip);
|
2020-05-14 21:01:31 +00:00
|
|
|
int error;
|
2020-05-14 21:01:17 +00:00
|
|
|
|
2020-05-18 17:27:22 +00:00
|
|
|
/*
|
|
|
|
* Initialize the extent count early, as the per-format routines may
|
|
|
|
* depend on it.
|
|
|
|
*/
|
2020-05-18 17:28:05 +00:00
|
|
|
ip->i_df.if_format = dip->di_format;
|
2020-05-18 17:27:22 +00:00
|
|
|
ip->i_df.if_nextents = be32_to_cpu(dip->di_nextents);
|
|
|
|
|
2020-05-14 21:01:17 +00:00
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
case S_IFIFO:
|
|
|
|
case S_IFCHR:
|
|
|
|
case S_IFBLK:
|
|
|
|
case S_IFSOCK:
|
2021-03-29 18:11:40 +00:00
|
|
|
ip->i_disk_size = 0;
|
2020-05-14 21:01:17 +00:00
|
|
|
inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip));
|
|
|
|
return 0;
|
|
|
|
case S_IFREG:
|
|
|
|
case S_IFLNK:
|
|
|
|
case S_IFDIR:
|
2020-05-18 17:28:05 +00:00
|
|
|
switch (ip->i_df.if_format) {
|
2020-05-14 21:01:17 +00:00
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
2020-05-14 21:01:31 +00:00
|
|
|
error = xfs_iformat_local(ip, dip, XFS_DATA_FORK,
|
2020-05-14 21:01:17 +00:00
|
|
|
be64_to_cpu(dip->di_size));
|
2020-05-14 21:01:31 +00:00
|
|
|
if (!error)
|
|
|
|
error = xfs_ifork_verify_local_data(ip);
|
|
|
|
return error;
|
2020-05-14 21:01:17 +00:00
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
|
|
|
return xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
|
|
|
return xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
|
|
|
|
default:
|
|
|
|
xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
|
|
|
|
dip, sizeof(*dip), __this_address);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
|
|
|
|
sizeof(*dip), __this_address);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint16_t
|
|
|
|
xfs_dfork_attr_shortform_size(
|
|
|
|
struct xfs_dinode *dip)
|
|
|
|
{
|
|
|
|
struct xfs_attr_shortform *atp =
|
|
|
|
(struct xfs_attr_shortform *)XFS_DFORK_APTR(dip);
|
|
|
|
|
|
|
|
return be16_to_cpu(atp->hdr.totsize);
|
|
|
|
}
|
|
|
|
|
xfs: initialise attr fork on inode create
When we allocate a new inode, we often need to add an attribute to
the inode as part of the create. This can happen as a result of
needing to add default ACLs or security labels before the inode is
made visible to userspace.
This is highly inefficient right now. We do the create transaction
to allocate the inode, then we do an "add attr fork" transaction to
modify the just created empty inode to set the inode fork offset to
allow attributes to be stored, then we go and do the attribute
creation.
This means 3 transactions instead of 1 to allocate an inode, and
this greatly increases the load on the CIL commit code, resulting in
excessive contention on the CIL spin locks and performance
degradation:
18.99% [kernel] [k] __pv_queued_spin_lock_slowpath
3.57% [kernel] [k] do_raw_spin_lock
2.51% [kernel] [k] __raw_callee_save___pv_queued_spin_unlock
2.48% [kernel] [k] memcpy
2.34% [kernel] [k] xfs_log_commit_cil
The typical profile resulting from running fsmark on a selinux enabled
filesytem is adds this overhead to the create path:
- 15.30% xfs_init_security
- 15.23% security_inode_init_security
- 13.05% xfs_initxattrs
- 12.94% xfs_attr_set
- 6.75% xfs_bmap_add_attrfork
- 5.51% xfs_trans_commit
- 5.48% __xfs_trans_commit
- 5.35% xfs_log_commit_cil
- 3.86% _raw_spin_lock
- do_raw_spin_lock
__pv_queued_spin_lock_slowpath
- 0.70% xfs_trans_alloc
0.52% xfs_trans_reserve
- 5.41% xfs_attr_set_args
- 5.39% xfs_attr_set_shortform.constprop.0
- 4.46% xfs_trans_commit
- 4.46% __xfs_trans_commit
- 4.33% xfs_log_commit_cil
- 2.74% _raw_spin_lock
- do_raw_spin_lock
__pv_queued_spin_lock_slowpath
0.60% xfs_inode_item_format
0.90% xfs_attr_try_sf_addname
- 1.99% selinux_inode_init_security
- 1.02% security_sid_to_context_force
- 1.00% security_sid_to_context_core
- 0.92% sidtab_entry_to_string
- 0.90% sidtab_sid2str_get
0.59% sidtab_sid2str_put.part.0
- 0.82% selinux_determine_inode_label
- 0.77% security_transition_sid
0.70% security_compute_sid.part.0
And fsmark creation rate performance drops by ~25%. The key point to
note here is that half the additional overhead comes from adding the
attribute fork to the newly created inode. That's crazy, considering
we can do this same thing at inode create time with a couple of
lines of code and no extra overhead.
So, if we know we are going to add an attribute immediately after
creating the inode, let's just initialise the attribute fork inside
the create transaction and chop that whole chunk of code out of
the create fast path. This completely removes the performance
drop caused by enabling SELinux, and the profile looks like:
- 8.99% xfs_init_security
- 9.00% security_inode_init_security
- 6.43% xfs_initxattrs
- 6.37% xfs_attr_set
- 5.45% xfs_attr_set_args
- 5.42% xfs_attr_set_shortform.constprop.0
- 4.51% xfs_trans_commit
- 4.54% __xfs_trans_commit
- 4.59% xfs_log_commit_cil
- 2.67% _raw_spin_lock
- 3.28% do_raw_spin_lock
3.08% __pv_queued_spin_lock_slowpath
0.66% xfs_inode_item_format
- 0.90% xfs_attr_try_sf_addname
- 0.60% xfs_trans_alloc
- 2.35% selinux_inode_init_security
- 1.25% security_sid_to_context_force
- 1.21% security_sid_to_context_core
- 1.19% sidtab_entry_to_string
- 1.20% sidtab_sid2str_get
- 0.86% sidtab_sid2str_put.part.0
- 0.62% _raw_spin_lock_irqsave
- 0.77% do_raw_spin_lock
__pv_queued_spin_lock_slowpath
- 0.84% selinux_determine_inode_label
- 0.83% security_transition_sid
0.86% security_compute_sid.part.0
Which indicates the XFS overhead of creating the selinux xattr has
been halved. This doesn't fix the CIL lock contention problem, just
means it's not a limiting factor for this workload. Lock contention
in the security subsystems is going to be an issue soon, though...
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
[djwong: fix compilation error when CONFIG_SECURITY=n]
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Gao Xiang <hsiangkao@redhat.com>
2021-03-22 16:52:03 +00:00
|
|
|
struct xfs_ifork *
|
|
|
|
xfs_ifork_alloc(
|
|
|
|
enum xfs_dinode_fmt format,
|
|
|
|
xfs_extnum_t nextents)
|
|
|
|
{
|
|
|
|
struct xfs_ifork *ifp;
|
|
|
|
|
2021-10-12 18:09:23 +00:00
|
|
|
ifp = kmem_cache_zalloc(xfs_ifork_cache, GFP_NOFS | __GFP_NOFAIL);
|
xfs: initialise attr fork on inode create
When we allocate a new inode, we often need to add an attribute to
the inode as part of the create. This can happen as a result of
needing to add default ACLs or security labels before the inode is
made visible to userspace.
This is highly inefficient right now. We do the create transaction
to allocate the inode, then we do an "add attr fork" transaction to
modify the just created empty inode to set the inode fork offset to
allow attributes to be stored, then we go and do the attribute
creation.
This means 3 transactions instead of 1 to allocate an inode, and
this greatly increases the load on the CIL commit code, resulting in
excessive contention on the CIL spin locks and performance
degradation:
18.99% [kernel] [k] __pv_queued_spin_lock_slowpath
3.57% [kernel] [k] do_raw_spin_lock
2.51% [kernel] [k] __raw_callee_save___pv_queued_spin_unlock
2.48% [kernel] [k] memcpy
2.34% [kernel] [k] xfs_log_commit_cil
The typical profile resulting from running fsmark on a selinux enabled
filesytem is adds this overhead to the create path:
- 15.30% xfs_init_security
- 15.23% security_inode_init_security
- 13.05% xfs_initxattrs
- 12.94% xfs_attr_set
- 6.75% xfs_bmap_add_attrfork
- 5.51% xfs_trans_commit
- 5.48% __xfs_trans_commit
- 5.35% xfs_log_commit_cil
- 3.86% _raw_spin_lock
- do_raw_spin_lock
__pv_queued_spin_lock_slowpath
- 0.70% xfs_trans_alloc
0.52% xfs_trans_reserve
- 5.41% xfs_attr_set_args
- 5.39% xfs_attr_set_shortform.constprop.0
- 4.46% xfs_trans_commit
- 4.46% __xfs_trans_commit
- 4.33% xfs_log_commit_cil
- 2.74% _raw_spin_lock
- do_raw_spin_lock
__pv_queued_spin_lock_slowpath
0.60% xfs_inode_item_format
0.90% xfs_attr_try_sf_addname
- 1.99% selinux_inode_init_security
- 1.02% security_sid_to_context_force
- 1.00% security_sid_to_context_core
- 0.92% sidtab_entry_to_string
- 0.90% sidtab_sid2str_get
0.59% sidtab_sid2str_put.part.0
- 0.82% selinux_determine_inode_label
- 0.77% security_transition_sid
0.70% security_compute_sid.part.0
And fsmark creation rate performance drops by ~25%. The key point to
note here is that half the additional overhead comes from adding the
attribute fork to the newly created inode. That's crazy, considering
we can do this same thing at inode create time with a couple of
lines of code and no extra overhead.
So, if we know we are going to add an attribute immediately after
creating the inode, let's just initialise the attribute fork inside
the create transaction and chop that whole chunk of code out of
the create fast path. This completely removes the performance
drop caused by enabling SELinux, and the profile looks like:
- 8.99% xfs_init_security
- 9.00% security_inode_init_security
- 6.43% xfs_initxattrs
- 6.37% xfs_attr_set
- 5.45% xfs_attr_set_args
- 5.42% xfs_attr_set_shortform.constprop.0
- 4.51% xfs_trans_commit
- 4.54% __xfs_trans_commit
- 4.59% xfs_log_commit_cil
- 2.67% _raw_spin_lock
- 3.28% do_raw_spin_lock
3.08% __pv_queued_spin_lock_slowpath
0.66% xfs_inode_item_format
- 0.90% xfs_attr_try_sf_addname
- 0.60% xfs_trans_alloc
- 2.35% selinux_inode_init_security
- 1.25% security_sid_to_context_force
- 1.21% security_sid_to_context_core
- 1.19% sidtab_entry_to_string
- 1.20% sidtab_sid2str_get
- 0.86% sidtab_sid2str_put.part.0
- 0.62% _raw_spin_lock_irqsave
- 0.77% do_raw_spin_lock
__pv_queued_spin_lock_slowpath
- 0.84% selinux_determine_inode_label
- 0.83% security_transition_sid
0.86% security_compute_sid.part.0
Which indicates the XFS overhead of creating the selinux xattr has
been halved. This doesn't fix the CIL lock contention problem, just
means it's not a limiting factor for this workload. Lock contention
in the security subsystems is going to be an issue soon, though...
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
[djwong: fix compilation error when CONFIG_SECURITY=n]
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Gao Xiang <hsiangkao@redhat.com>
2021-03-22 16:52:03 +00:00
|
|
|
ifp->if_format = format;
|
|
|
|
ifp->if_nextents = nextents;
|
|
|
|
return ifp;
|
|
|
|
}
|
|
|
|
|
2020-05-14 21:01:17 +00:00
|
|
|
int
|
|
|
|
xfs_iformat_attr_fork(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_dinode *dip)
|
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
|
2020-05-18 17:27:22 +00:00
|
|
|
/*
|
|
|
|
* Initialize the extent count early, as the per-format routines may
|
|
|
|
* depend on it.
|
|
|
|
*/
|
xfs: initialise attr fork on inode create
When we allocate a new inode, we often need to add an attribute to
the inode as part of the create. This can happen as a result of
needing to add default ACLs or security labels before the inode is
made visible to userspace.
This is highly inefficient right now. We do the create transaction
to allocate the inode, then we do an "add attr fork" transaction to
modify the just created empty inode to set the inode fork offset to
allow attributes to be stored, then we go and do the attribute
creation.
This means 3 transactions instead of 1 to allocate an inode, and
this greatly increases the load on the CIL commit code, resulting in
excessive contention on the CIL spin locks and performance
degradation:
18.99% [kernel] [k] __pv_queued_spin_lock_slowpath
3.57% [kernel] [k] do_raw_spin_lock
2.51% [kernel] [k] __raw_callee_save___pv_queued_spin_unlock
2.48% [kernel] [k] memcpy
2.34% [kernel] [k] xfs_log_commit_cil
The typical profile resulting from running fsmark on a selinux enabled
filesytem is adds this overhead to the create path:
- 15.30% xfs_init_security
- 15.23% security_inode_init_security
- 13.05% xfs_initxattrs
- 12.94% xfs_attr_set
- 6.75% xfs_bmap_add_attrfork
- 5.51% xfs_trans_commit
- 5.48% __xfs_trans_commit
- 5.35% xfs_log_commit_cil
- 3.86% _raw_spin_lock
- do_raw_spin_lock
__pv_queued_spin_lock_slowpath
- 0.70% xfs_trans_alloc
0.52% xfs_trans_reserve
- 5.41% xfs_attr_set_args
- 5.39% xfs_attr_set_shortform.constprop.0
- 4.46% xfs_trans_commit
- 4.46% __xfs_trans_commit
- 4.33% xfs_log_commit_cil
- 2.74% _raw_spin_lock
- do_raw_spin_lock
__pv_queued_spin_lock_slowpath
0.60% xfs_inode_item_format
0.90% xfs_attr_try_sf_addname
- 1.99% selinux_inode_init_security
- 1.02% security_sid_to_context_force
- 1.00% security_sid_to_context_core
- 0.92% sidtab_entry_to_string
- 0.90% sidtab_sid2str_get
0.59% sidtab_sid2str_put.part.0
- 0.82% selinux_determine_inode_label
- 0.77% security_transition_sid
0.70% security_compute_sid.part.0
And fsmark creation rate performance drops by ~25%. The key point to
note here is that half the additional overhead comes from adding the
attribute fork to the newly created inode. That's crazy, considering
we can do this same thing at inode create time with a couple of
lines of code and no extra overhead.
So, if we know we are going to add an attribute immediately after
creating the inode, let's just initialise the attribute fork inside
the create transaction and chop that whole chunk of code out of
the create fast path. This completely removes the performance
drop caused by enabling SELinux, and the profile looks like:
- 8.99% xfs_init_security
- 9.00% security_inode_init_security
- 6.43% xfs_initxattrs
- 6.37% xfs_attr_set
- 5.45% xfs_attr_set_args
- 5.42% xfs_attr_set_shortform.constprop.0
- 4.51% xfs_trans_commit
- 4.54% __xfs_trans_commit
- 4.59% xfs_log_commit_cil
- 2.67% _raw_spin_lock
- 3.28% do_raw_spin_lock
3.08% __pv_queued_spin_lock_slowpath
0.66% xfs_inode_item_format
- 0.90% xfs_attr_try_sf_addname
- 0.60% xfs_trans_alloc
- 2.35% selinux_inode_init_security
- 1.25% security_sid_to_context_force
- 1.21% security_sid_to_context_core
- 1.19% sidtab_entry_to_string
- 1.20% sidtab_sid2str_get
- 0.86% sidtab_sid2str_put.part.0
- 0.62% _raw_spin_lock_irqsave
- 0.77% do_raw_spin_lock
__pv_queued_spin_lock_slowpath
- 0.84% selinux_determine_inode_label
- 0.83% security_transition_sid
0.86% security_compute_sid.part.0
Which indicates the XFS overhead of creating the selinux xattr has
been halved. This doesn't fix the CIL lock contention problem, just
means it's not a limiting factor for this workload. Lock contention
in the security subsystems is going to be an issue soon, though...
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
[djwong: fix compilation error when CONFIG_SECURITY=n]
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Gao Xiang <hsiangkao@redhat.com>
2021-03-22 16:52:03 +00:00
|
|
|
ip->i_afp = xfs_ifork_alloc(dip->di_aformat,
|
|
|
|
be16_to_cpu(dip->di_anextents));
|
2020-05-18 17:27:22 +00:00
|
|
|
|
2020-05-18 17:28:05 +00:00
|
|
|
switch (ip->i_afp->if_format) {
|
2020-05-14 21:01:17 +00:00
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
|
|
|
error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK,
|
|
|
|
xfs_dfork_attr_shortform_size(dip));
|
2020-05-14 21:01:31 +00:00
|
|
|
if (!error)
|
|
|
|
error = xfs_ifork_verify_local_attr(ip);
|
2020-05-14 21:01:17 +00:00
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
|
|
|
error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
|
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
|
|
|
error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
xfs_inode_verifier_error(ip, error, __func__, dip,
|
|
|
|
sizeof(*dip), __this_address);
|
|
|
|
error = -EFSCORRUPTED;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error) {
|
2021-10-12 18:09:23 +00:00
|
|
|
kmem_cache_free(xfs_ifork_cache, ip->i_afp);
|
2020-05-14 21:01:17 +00:00
|
|
|
ip->i_afp = NULL;
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2013-08-12 10:49:33 +00:00
|
|
|
/*
|
|
|
|
* Reallocate the space for if_broot based on the number of records
|
|
|
|
* being added or deleted as indicated in rec_diff. Move the records
|
|
|
|
* and pointers in if_broot to fit the new size. When shrinking this
|
|
|
|
* will eliminate holes between the records and pointers created by
|
|
|
|
* the caller. When growing this will create holes to be filled in
|
|
|
|
* by the caller.
|
|
|
|
*
|
|
|
|
* The caller must not request to add more records than would fit in
|
|
|
|
* the on-disk inode root. If the if_broot is currently NULL, then
|
2013-08-07 10:11:04 +00:00
|
|
|
* if we are adding records, one will be allocated. The caller must also
|
2013-08-12 10:49:33 +00:00
|
|
|
* not request that the number of records go below zero, although
|
|
|
|
* it can go to zero.
|
|
|
|
*
|
|
|
|
* ip -- the inode whose if_broot area is changing
|
|
|
|
* ext_diff -- the change in the number of records, positive or negative,
|
|
|
|
* requested for the if_broot array.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_iroot_realloc(
|
|
|
|
xfs_inode_t *ip,
|
|
|
|
int rec_diff,
|
|
|
|
int whichfork)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
int cur_max;
|
2018-07-17 23:51:50 +00:00
|
|
|
struct xfs_ifork *ifp;
|
2013-08-12 10:49:33 +00:00
|
|
|
struct xfs_btree_block *new_broot;
|
|
|
|
int new_max;
|
|
|
|
size_t new_size;
|
|
|
|
char *np;
|
|
|
|
char *op;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle the degenerate case quietly.
|
|
|
|
*/
|
|
|
|
if (rec_diff == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
|
if (rec_diff > 0) {
|
|
|
|
/*
|
|
|
|
* If there wasn't any memory allocated before, just
|
|
|
|
* allocate it now and get out.
|
|
|
|
*/
|
|
|
|
if (ifp->if_broot_bytes == 0) {
|
|
|
|
new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
|
2019-08-26 19:06:22 +00:00
|
|
|
ifp->if_broot = kmem_alloc(new_size, KM_NOFS);
|
2013-08-12 10:49:33 +00:00
|
|
|
ifp->if_broot_bytes = (int)new_size;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is already an existing if_broot, then we need
|
|
|
|
* to realloc() it and shift the pointers to their new
|
|
|
|
* location. The records don't change location because
|
|
|
|
* they are kept butted up against the btree block header.
|
|
|
|
*/
|
|
|
|
cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
|
|
|
|
new_max = cur_max + rec_diff;
|
|
|
|
new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
|
2020-08-26 21:05:56 +00:00
|
|
|
ifp->if_broot = krealloc(ifp->if_broot, new_size,
|
|
|
|
GFP_NOFS | __GFP_NOFAIL);
|
2013-08-12 10:49:33 +00:00
|
|
|
op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
|
|
|
|
ifp->if_broot_bytes);
|
|
|
|
np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
|
|
|
|
(int)new_size);
|
|
|
|
ifp->if_broot_bytes = (int)new_size;
|
|
|
|
ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
|
|
|
|
XFS_IFORK_SIZE(ip, whichfork));
|
2014-07-29 23:12:05 +00:00
|
|
|
memmove(np, op, cur_max * (uint)sizeof(xfs_fsblock_t));
|
2013-08-12 10:49:33 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rec_diff is less than 0. In this case, we are shrinking the
|
|
|
|
* if_broot buffer. It must already exist. If we go to zero
|
|
|
|
* records, just get rid of the root and clear the status bit.
|
|
|
|
*/
|
|
|
|
ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
|
|
|
|
cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
|
|
|
|
new_max = cur_max + rec_diff;
|
|
|
|
ASSERT(new_max >= 0);
|
|
|
|
if (new_max > 0)
|
|
|
|
new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
|
|
|
|
else
|
|
|
|
new_size = 0;
|
|
|
|
if (new_size > 0) {
|
2019-08-26 19:06:22 +00:00
|
|
|
new_broot = kmem_alloc(new_size, KM_NOFS);
|
2013-08-12 10:49:33 +00:00
|
|
|
/*
|
|
|
|
* First copy over the btree block header.
|
|
|
|
*/
|
|
|
|
memcpy(new_broot, ifp->if_broot,
|
|
|
|
XFS_BMBT_BLOCK_LEN(ip->i_mount));
|
|
|
|
} else {
|
|
|
|
new_broot = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only copy the records and pointers if there are any.
|
|
|
|
*/
|
|
|
|
if (new_max > 0) {
|
|
|
|
/*
|
|
|
|
* First copy the records.
|
|
|
|
*/
|
|
|
|
op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
|
|
|
|
np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
|
|
|
|
memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Then copy the pointers.
|
|
|
|
*/
|
|
|
|
op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
|
|
|
|
ifp->if_broot_bytes);
|
|
|
|
np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
|
|
|
|
(int)new_size);
|
2014-07-29 23:12:05 +00:00
|
|
|
memcpy(np, op, new_max * (uint)sizeof(xfs_fsblock_t));
|
2013-08-12 10:49:33 +00:00
|
|
|
}
|
|
|
|
kmem_free(ifp->if_broot);
|
|
|
|
ifp->if_broot = new_broot;
|
|
|
|
ifp->if_broot_bytes = (int)new_size;
|
|
|
|
if (ifp->if_broot)
|
|
|
|
ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
|
|
|
|
XFS_IFORK_SIZE(ip, whichfork));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called when the amount of space needed for if_data
|
|
|
|
* is increased or decreased. The change in size is indicated by
|
|
|
|
* the number of bytes that need to be added or deleted in the
|
|
|
|
* byte_diff parameter.
|
|
|
|
*
|
|
|
|
* If the amount of space needed has decreased below the size of the
|
|
|
|
* inline buffer, then switch to using the inline buffer. Otherwise,
|
|
|
|
* use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
|
|
|
|
* to what is needed.
|
|
|
|
*
|
|
|
|
* ip -- the inode whose if_data area is changing
|
|
|
|
* byte_diff -- the change in the number of bytes, positive or negative,
|
|
|
|
* requested for the if_data array.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_idata_realloc(
|
2018-07-17 23:51:50 +00:00
|
|
|
struct xfs_inode *ip,
|
xfs: fix inode fork extent count overflow
[commit message is verbose for discussion purposes - will trim it
down later. Some questions about implementation details at the end.]
Zorro Lang recently ran a new test to stress single inode extent
counts now that they are no longer limited by memory allocation.
The test was simply:
# xfs_io -f -c "falloc 0 40t" /mnt/scratch/big-file
# ~/src/xfstests-dev/punch-alternating /mnt/scratch/big-file
This test uncovered a problem where the hole punching operation
appeared to finish with no error, but apparently only created 268M
extents instead of the 10 billion it was supposed to.
Further, trying to punch out extents that should have been present
resulted in success, but no change in the extent count. It looked
like a silent failure.
While running the test and observing the behaviour in real time,
I observed the extent coutn growing at ~2M extents/minute, and saw
this after about an hour:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next ; \
> sleep 60 ; \
> xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 127657993
fsxattr.nextents = 129683339
#
And a few minutes later this:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 4177861124
#
Ah, what? Where did that 4 billion extra extents suddenly come from?
Stop the workload, unmount, mount:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 166044375
#
And it's back at the expected number. i.e. the extent count is
correct on disk, but it's screwed up in memory. I loaded up the
extent list, and immediately:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 4192576215
#
It's bad again. So, where does that number come from?
xfs_fill_fsxattr():
if (ip->i_df.if_flags & XFS_IFEXTENTS)
fa->fsx_nextents = xfs_iext_count(&ip->i_df);
else
fa->fsx_nextents = ip->i_d.di_nextents;
And that's the behaviour I just saw in a nutshell. The on disk count
is correct, but once the tree is loaded into memory, it goes whacky.
Clearly there's something wrong with xfs_iext_count():
inline xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp)
{
return ifp->if_bytes / sizeof(struct xfs_iext_rec);
}
Simple enough, but 134M extents is 2**27, and that's right about
where things went wrong. A struct xfs_iext_rec is 16 bytes in size,
which means 2**27 * 2**4 = 2**31 and we're right on target for an
integer overflow. And, sure enough:
struct xfs_ifork {
int if_bytes; /* bytes in if_u1 */
....
Once we get 2**27 extents in a file, we overflow if_bytes and the
in-core extent count goes wrong. And when we reach 2**28 extents,
if_bytes wraps back to zero and things really start to go wrong
there. This is where the silent failure comes from - only the first
2**28 extents can be looked up directly due to the overflow, all the
extents above this index wrap back to somewhere in the first 2**28
extents. Hence with a regular pattern, trying to punch a hole in the
range that didn't have holes mapped to a hole in the first 2**28
extents and so "succeeded" without changing anything. Hence "silent
failure"...
Fix this by converting if_bytes to a int64_t and converting all the
index variables and size calculations to use int64_t types to avoid
overflows in future. Signed integers are still used to enable easy
detection of extent count underflows. This enables scalability of
extent counts to the limits of the on-disk format - MAXEXTNUM
(2**31) extents.
Current testing is at over 500M extents and still going:
fsxattr.nextents = 517310478
Reported-by: Zorro Lang <zlang@redhat.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-10-17 20:40:33 +00:00
|
|
|
int64_t byte_diff,
|
2018-07-17 23:51:50 +00:00
|
|
|
int whichfork)
|
2013-08-12 10:49:33 +00:00
|
|
|
{
|
2018-07-17 23:51:50 +00:00
|
|
|
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
xfs: fix inode fork extent count overflow
[commit message is verbose for discussion purposes - will trim it
down later. Some questions about implementation details at the end.]
Zorro Lang recently ran a new test to stress single inode extent
counts now that they are no longer limited by memory allocation.
The test was simply:
# xfs_io -f -c "falloc 0 40t" /mnt/scratch/big-file
# ~/src/xfstests-dev/punch-alternating /mnt/scratch/big-file
This test uncovered a problem where the hole punching operation
appeared to finish with no error, but apparently only created 268M
extents instead of the 10 billion it was supposed to.
Further, trying to punch out extents that should have been present
resulted in success, but no change in the extent count. It looked
like a silent failure.
While running the test and observing the behaviour in real time,
I observed the extent coutn growing at ~2M extents/minute, and saw
this after about an hour:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next ; \
> sleep 60 ; \
> xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 127657993
fsxattr.nextents = 129683339
#
And a few minutes later this:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 4177861124
#
Ah, what? Where did that 4 billion extra extents suddenly come from?
Stop the workload, unmount, mount:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 166044375
#
And it's back at the expected number. i.e. the extent count is
correct on disk, but it's screwed up in memory. I loaded up the
extent list, and immediately:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 4192576215
#
It's bad again. So, where does that number come from?
xfs_fill_fsxattr():
if (ip->i_df.if_flags & XFS_IFEXTENTS)
fa->fsx_nextents = xfs_iext_count(&ip->i_df);
else
fa->fsx_nextents = ip->i_d.di_nextents;
And that's the behaviour I just saw in a nutshell. The on disk count
is correct, but once the tree is loaded into memory, it goes whacky.
Clearly there's something wrong with xfs_iext_count():
inline xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp)
{
return ifp->if_bytes / sizeof(struct xfs_iext_rec);
}
Simple enough, but 134M extents is 2**27, and that's right about
where things went wrong. A struct xfs_iext_rec is 16 bytes in size,
which means 2**27 * 2**4 = 2**31 and we're right on target for an
integer overflow. And, sure enough:
struct xfs_ifork {
int if_bytes; /* bytes in if_u1 */
....
Once we get 2**27 extents in a file, we overflow if_bytes and the
in-core extent count goes wrong. And when we reach 2**28 extents,
if_bytes wraps back to zero and things really start to go wrong
there. This is where the silent failure comes from - only the first
2**28 extents can be looked up directly due to the overflow, all the
extents above this index wrap back to somewhere in the first 2**28
extents. Hence with a regular pattern, trying to punch a hole in the
range that didn't have holes mapped to a hole in the first 2**28
extents and so "succeeded" without changing anything. Hence "silent
failure"...
Fix this by converting if_bytes to a int64_t and converting all the
index variables and size calculations to use int64_t types to avoid
overflows in future. Signed integers are still used to enable easy
detection of extent count underflows. This enables scalability of
extent counts to the limits of the on-disk format - MAXEXTNUM
(2**31) extents.
Current testing is at over 500M extents and still going:
fsxattr.nextents = 517310478
Reported-by: Zorro Lang <zlang@redhat.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-10-17 20:40:33 +00:00
|
|
|
int64_t new_size = ifp->if_bytes + byte_diff;
|
2013-08-12 10:49:33 +00:00
|
|
|
|
|
|
|
ASSERT(new_size >= 0);
|
2018-07-17 23:51:50 +00:00
|
|
|
ASSERT(new_size <= XFS_IFORK_SIZE(ip, whichfork));
|
|
|
|
|
|
|
|
if (byte_diff == 0)
|
|
|
|
return;
|
2013-08-12 10:49:33 +00:00
|
|
|
|
|
|
|
if (new_size == 0) {
|
2017-11-03 17:34:45 +00:00
|
|
|
kmem_free(ifp->if_u1.if_data);
|
2013-08-12 10:49:33 +00:00
|
|
|
ifp->if_u1.if_data = NULL;
|
2018-07-17 23:51:50 +00:00
|
|
|
ifp->if_bytes = 0;
|
|
|
|
return;
|
2013-08-12 10:49:33 +00:00
|
|
|
}
|
2018-07-17 23:51:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For inline data, the underlying buffer must be a multiple of 4 bytes
|
|
|
|
* in size so that it can be logged and stay on word boundaries.
|
|
|
|
* We enforce that here.
|
|
|
|
*/
|
2020-08-26 21:05:56 +00:00
|
|
|
ifp->if_u1.if_data = krealloc(ifp->if_u1.if_data, roundup(new_size, 4),
|
|
|
|
GFP_NOFS | __GFP_NOFAIL);
|
2013-08-12 10:49:33 +00:00
|
|
|
ifp->if_bytes = new_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_idestroy_fork(
|
2020-05-18 17:29:27 +00:00
|
|
|
struct xfs_ifork *ifp)
|
2013-08-12 10:49:33 +00:00
|
|
|
{
|
|
|
|
if (ifp->if_broot != NULL) {
|
|
|
|
kmem_free(ifp->if_broot);
|
|
|
|
ifp->if_broot = NULL;
|
|
|
|
}
|
|
|
|
|
2021-04-13 18:15:10 +00:00
|
|
|
switch (ifp->if_format) {
|
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
2020-05-18 17:29:27 +00:00
|
|
|
kmem_free(ifp->if_u1.if_data);
|
|
|
|
ifp->if_u1.if_data = NULL;
|
2021-04-13 18:15:10 +00:00
|
|
|
break;
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
2020-05-18 17:29:27 +00:00
|
|
|
if (ifp->if_height)
|
|
|
|
xfs_iext_destroy(ifp);
|
2021-04-13 18:15:10 +00:00
|
|
|
break;
|
2013-08-12 10:49:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-12-13 00:34:04 +00:00
|
|
|
* Convert in-core extents to on-disk form
|
2013-08-12 10:49:33 +00:00
|
|
|
*
|
2013-12-13 00:34:04 +00:00
|
|
|
* In the case of the data fork, the in-core and on-disk fork sizes can be
|
|
|
|
* different due to delayed allocation extents. We only copy on-disk extents
|
|
|
|
* here, so callers must always use the physical fork size to determine the
|
|
|
|
* size of the buffer passed to this routine. We will return the size actually
|
|
|
|
* used.
|
2013-08-12 10:49:33 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_iextents_copy(
|
2017-11-03 17:34:42 +00:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_bmbt_rec *dp,
|
2013-08-12 10:49:33 +00:00
|
|
|
int whichfork)
|
|
|
|
{
|
2017-10-19 18:06:29 +00:00
|
|
|
int state = xfs_bmap_fork_to_state(whichfork);
|
2017-11-03 17:34:42 +00:00
|
|
|
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
2017-11-03 17:34:43 +00:00
|
|
|
struct xfs_iext_cursor icur;
|
2017-11-03 17:34:42 +00:00
|
|
|
struct xfs_bmbt_irec rec;
|
xfs: fix inode fork extent count overflow
[commit message is verbose for discussion purposes - will trim it
down later. Some questions about implementation details at the end.]
Zorro Lang recently ran a new test to stress single inode extent
counts now that they are no longer limited by memory allocation.
The test was simply:
# xfs_io -f -c "falloc 0 40t" /mnt/scratch/big-file
# ~/src/xfstests-dev/punch-alternating /mnt/scratch/big-file
This test uncovered a problem where the hole punching operation
appeared to finish with no error, but apparently only created 268M
extents instead of the 10 billion it was supposed to.
Further, trying to punch out extents that should have been present
resulted in success, but no change in the extent count. It looked
like a silent failure.
While running the test and observing the behaviour in real time,
I observed the extent coutn growing at ~2M extents/minute, and saw
this after about an hour:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next ; \
> sleep 60 ; \
> xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 127657993
fsxattr.nextents = 129683339
#
And a few minutes later this:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 4177861124
#
Ah, what? Where did that 4 billion extra extents suddenly come from?
Stop the workload, unmount, mount:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 166044375
#
And it's back at the expected number. i.e. the extent count is
correct on disk, but it's screwed up in memory. I loaded up the
extent list, and immediately:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 4192576215
#
It's bad again. So, where does that number come from?
xfs_fill_fsxattr():
if (ip->i_df.if_flags & XFS_IFEXTENTS)
fa->fsx_nextents = xfs_iext_count(&ip->i_df);
else
fa->fsx_nextents = ip->i_d.di_nextents;
And that's the behaviour I just saw in a nutshell. The on disk count
is correct, but once the tree is loaded into memory, it goes whacky.
Clearly there's something wrong with xfs_iext_count():
inline xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp)
{
return ifp->if_bytes / sizeof(struct xfs_iext_rec);
}
Simple enough, but 134M extents is 2**27, and that's right about
where things went wrong. A struct xfs_iext_rec is 16 bytes in size,
which means 2**27 * 2**4 = 2**31 and we're right on target for an
integer overflow. And, sure enough:
struct xfs_ifork {
int if_bytes; /* bytes in if_u1 */
....
Once we get 2**27 extents in a file, we overflow if_bytes and the
in-core extent count goes wrong. And when we reach 2**28 extents,
if_bytes wraps back to zero and things really start to go wrong
there. This is where the silent failure comes from - only the first
2**28 extents can be looked up directly due to the overflow, all the
extents above this index wrap back to somewhere in the first 2**28
extents. Hence with a regular pattern, trying to punch a hole in the
range that didn't have holes mapped to a hole in the first 2**28
extents and so "succeeded" without changing anything. Hence "silent
failure"...
Fix this by converting if_bytes to a int64_t and converting all the
index variables and size calculations to use int64_t types to avoid
overflows in future. Signed integers are still used to enable easy
detection of extent count underflows. This enables scalability of
extent counts to the limits of the on-disk format - MAXEXTNUM
(2**31) extents.
Current testing is at over 500M extents and still going:
fsxattr.nextents = 517310478
Reported-by: Zorro Lang <zlang@redhat.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-10-17 20:40:33 +00:00
|
|
|
int64_t copied = 0;
|
2013-08-12 10:49:33 +00:00
|
|
|
|
2017-11-03 17:34:42 +00:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
|
2013-08-12 10:49:33 +00:00
|
|
|
ASSERT(ifp->if_bytes > 0);
|
|
|
|
|
2017-11-03 17:34:43 +00:00
|
|
|
for_each_xfs_iext(ifp, &icur, &rec) {
|
2017-11-03 17:34:42 +00:00
|
|
|
if (isnullstartblock(rec.br_startblock))
|
2013-08-12 10:49:33 +00:00
|
|
|
continue;
|
2018-03-23 17:06:52 +00:00
|
|
|
ASSERT(xfs_bmap_validate_extent(ip, whichfork, &rec) == NULL);
|
2017-11-03 17:34:42 +00:00
|
|
|
xfs_bmbt_disk_set_all(dp, &rec);
|
2017-11-03 17:34:43 +00:00
|
|
|
trace_xfs_write_extent(ip, &icur, state, _RET_IP_);
|
2017-11-03 17:34:42 +00:00
|
|
|
copied += sizeof(struct xfs_bmbt_rec);
|
2013-08-12 10:49:33 +00:00
|
|
|
dp++;
|
|
|
|
}
|
|
|
|
|
2017-11-03 17:34:42 +00:00
|
|
|
ASSERT(copied > 0);
|
|
|
|
ASSERT(copied <= ifp->if_bytes);
|
|
|
|
return copied;
|
2013-08-12 10:49:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each of the following cases stores data into the same region
|
|
|
|
* of the on-disk inode, so only one of them can be valid at
|
|
|
|
* any given time. While it is possible to have conflicting formats
|
|
|
|
* and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
|
|
|
|
* in EXTENTS format, this can only happen when the fork has
|
|
|
|
* changed formats after being modified but before being flushed.
|
|
|
|
* In these cases, the format always takes precedence, because the
|
|
|
|
* format indicates the current state of the fork.
|
|
|
|
*/
|
2017-03-28 21:51:10 +00:00
|
|
|
void
|
2013-08-12 10:49:33 +00:00
|
|
|
xfs_iflush_fork(
|
2021-10-11 23:11:21 +00:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
struct xfs_dinode *dip,
|
2020-04-30 19:52:19 +00:00
|
|
|
struct xfs_inode_log_item *iip,
|
2014-04-14 09:04:46 +00:00
|
|
|
int whichfork)
|
2013-08-12 10:49:33 +00:00
|
|
|
{
|
|
|
|
char *cp;
|
2018-07-17 23:51:50 +00:00
|
|
|
struct xfs_ifork *ifp;
|
2013-08-12 10:49:33 +00:00
|
|
|
xfs_mount_t *mp;
|
|
|
|
static const short brootflag[2] =
|
|
|
|
{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
|
|
|
|
static const short dataflag[2] =
|
|
|
|
{ XFS_ILOG_DDATA, XFS_ILOG_ADATA };
|
|
|
|
static const short extflag[2] =
|
|
|
|
{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
|
|
|
|
|
|
|
|
if (!iip)
|
2017-03-28 21:51:10 +00:00
|
|
|
return;
|
2013-08-12 10:49:33 +00:00
|
|
|
ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
|
/*
|
|
|
|
* This can happen if we gave up in iformat in an error path,
|
|
|
|
* for the attribute fork.
|
|
|
|
*/
|
|
|
|
if (!ifp) {
|
|
|
|
ASSERT(whichfork == XFS_ATTR_FORK);
|
2017-03-28 21:51:10 +00:00
|
|
|
return;
|
2013-08-12 10:49:33 +00:00
|
|
|
}
|
|
|
|
cp = XFS_DFORK_PTR(dip, whichfork);
|
|
|
|
mp = ip->i_mount;
|
2020-05-18 17:28:05 +00:00
|
|
|
switch (ifp->if_format) {
|
2013-08-12 10:49:33 +00:00
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
|
|
|
if ((iip->ili_fields & dataflag[whichfork]) &&
|
|
|
|
(ifp->if_bytes > 0)) {
|
|
|
|
ASSERT(ifp->if_u1.if_data != NULL);
|
|
|
|
ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
|
|
|
|
memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
|
|
|
if ((iip->ili_fields & extflag[whichfork]) &&
|
|
|
|
(ifp->if_bytes > 0)) {
|
2020-05-18 17:27:22 +00:00
|
|
|
ASSERT(ifp->if_nextents > 0);
|
2013-08-12 10:49:33 +00:00
|
|
|
(void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
|
|
|
|
whichfork);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_BTREE:
|
|
|
|
if ((iip->ili_fields & brootflag[whichfork]) &&
|
|
|
|
(ifp->if_broot_bytes > 0)) {
|
|
|
|
ASSERT(ifp->if_broot != NULL);
|
|
|
|
ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
|
|
|
|
XFS_IFORK_SIZE(ip, whichfork));
|
|
|
|
xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
|
|
|
|
(xfs_bmdr_block_t *)cp,
|
|
|
|
XFS_DFORK_SIZE(dip, mp, whichfork));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFS_DINODE_FMT_DEV:
|
|
|
|
if (iip->ili_fields & XFS_ILOG_DEV) {
|
|
|
|
ASSERT(whichfork == XFS_DATA_FORK);
|
2017-11-20 16:56:52 +00:00
|
|
|
xfs_dinode_put_rdev(dip,
|
|
|
|
linux_to_xfs_dev_t(VFS_I(ip)->i_rdev));
|
2013-08-12 10:49:33 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-03 16:11:32 +00:00
|
|
|
/* Convert bmap state flags to an inode fork. */
|
|
|
|
struct xfs_ifork *
|
|
|
|
xfs_iext_state_to_fork(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int state)
|
|
|
|
{
|
|
|
|
if (state & BMAP_COWFORK)
|
|
|
|
return ip->i_cowfp;
|
|
|
|
else if (state & BMAP_ATTRFORK)
|
|
|
|
return ip->i_afp;
|
|
|
|
return &ip->i_df;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize an inode's copy-on-write fork.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_ifork_init_cow(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
if (ip->i_cowfp)
|
|
|
|
return;
|
|
|
|
|
2021-10-12 18:09:23 +00:00
|
|
|
ip->i_cowfp = kmem_cache_zalloc(xfs_ifork_cache,
|
2020-07-22 16:23:10 +00:00
|
|
|
GFP_NOFS | __GFP_NOFAIL);
|
2020-05-18 17:28:05 +00:00
|
|
|
ip->i_cowfp->if_format = XFS_DINODE_FMT_EXTENTS;
|
2016-10-03 16:11:32 +00:00
|
|
|
}
|
2018-01-08 18:51:06 +00:00
|
|
|
|
|
|
|
/* Verify the inline contents of the data fork of an inode. */
|
2020-05-14 21:01:19 +00:00
|
|
|
int
|
|
|
|
xfs_ifork_verify_local_data(
|
2020-05-14 21:01:19 +00:00
|
|
|
struct xfs_inode *ip)
|
2018-01-08 18:51:06 +00:00
|
|
|
{
|
2020-05-14 21:01:19 +00:00
|
|
|
xfs_failaddr_t fa = NULL;
|
2018-01-08 18:51:06 +00:00
|
|
|
|
|
|
|
switch (VFS_I(ip)->i_mode & S_IFMT) {
|
|
|
|
case S_IFDIR:
|
2020-05-14 21:01:19 +00:00
|
|
|
fa = xfs_dir2_sf_verify(ip);
|
|
|
|
break;
|
2018-01-08 18:51:06 +00:00
|
|
|
case S_IFLNK:
|
2020-05-14 21:01:19 +00:00
|
|
|
fa = xfs_symlink_shortform_verify(ip);
|
|
|
|
break;
|
2018-01-08 18:51:06 +00:00
|
|
|
default:
|
2020-05-14 21:01:19 +00:00
|
|
|
break;
|
2018-01-08 18:51:06 +00:00
|
|
|
}
|
2020-05-14 21:01:19 +00:00
|
|
|
|
|
|
|
if (fa) {
|
|
|
|
xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
|
|
|
|
ip->i_df.if_u1.if_data, ip->i_df.if_bytes, fa);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2018-01-08 18:51:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify the inline contents of the attr fork of an inode. */
|
2020-05-14 21:01:19 +00:00
|
|
|
int
|
|
|
|
xfs_ifork_verify_local_attr(
|
2020-05-14 21:01:19 +00:00
|
|
|
struct xfs_inode *ip)
|
2018-01-08 18:51:06 +00:00
|
|
|
{
|
2020-05-14 21:01:19 +00:00
|
|
|
struct xfs_ifork *ifp = ip->i_afp;
|
|
|
|
xfs_failaddr_t fa;
|
|
|
|
|
|
|
|
if (!ifp)
|
|
|
|
fa = __this_address;
|
|
|
|
else
|
|
|
|
fa = xfs_attr_shortform_verify(ip);
|
|
|
|
|
|
|
|
if (fa) {
|
|
|
|
xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
|
|
|
|
ifp ? ifp->if_u1.if_data : NULL,
|
|
|
|
ifp ? ifp->if_bytes : 0, fa);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2018-01-08 18:51:06 +00:00
|
|
|
}
|
2021-01-23 00:48:10 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
xfs_iext_count_may_overflow(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
int whichfork,
|
|
|
|
int nr_to_add)
|
|
|
|
{
|
|
|
|
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
|
uint64_t max_exts;
|
|
|
|
uint64_t nr_exts;
|
|
|
|
|
|
|
|
if (whichfork == XFS_COW_FORK)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
max_exts = (whichfork == XFS_ATTR_FORK) ? MAXAEXTNUM : MAXEXTNUM;
|
|
|
|
|
2021-01-23 00:48:15 +00:00
|
|
|
if (XFS_TEST_ERROR(false, ip->i_mount, XFS_ERRTAG_REDUCE_MAX_IEXTENTS))
|
|
|
|
max_exts = 10;
|
|
|
|
|
2021-01-23 00:48:10 +00:00
|
|
|
nr_exts = ifp->if_nextents + nr_to_add;
|
|
|
|
if (nr_exts < ifp->if_nextents || nr_exts > max_exts)
|
|
|
|
return -EFBIG;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|