2020-05-12 23:54:17 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-11-02 03:58:39 +00:00
|
|
|
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#ifndef __XFS_ALLOC_H__
|
|
|
|
#define __XFS_ALLOC_H__
|
|
|
|
|
|
|
|
struct xfs_buf;
|
2011-01-07 13:02:04 +00:00
|
|
|
struct xfs_btree_cur;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct xfs_mount;
|
|
|
|
struct xfs_perag;
|
|
|
|
struct xfs_trans;
|
|
|
|
|
2012-03-22 05:15:07 +00:00
|
|
|
extern struct workqueue_struct *xfs_alloc_wq;
|
|
|
|
|
2018-03-07 01:08:32 +00:00
|
|
|
unsigned int xfs_agfl_size(struct xfs_mount *mp);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Flags for xfs_alloc_fix_freelist.
|
|
|
|
*/
|
xfs: don't block in busy flushing when freeing extents
If the current transaction holds a busy extent and we are trying to
allocate a new extent to fix up the free list, we can deadlock if
the AG is entirely empty except for the busy extent held by the
transaction.
This can occur at runtime processing an XEFI with multiple extents
in this path:
__schedule+0x22f at ffffffff81f75e8f
schedule+0x46 at ffffffff81f76366
xfs_extent_busy_flush+0x69 at ffffffff81477d99
xfs_alloc_ag_vextent_size+0x16a at ffffffff8141711a
xfs_alloc_ag_vextent+0x19b at ffffffff81417edb
xfs_alloc_fix_freelist+0x22f at ffffffff8141896f
xfs_free_extent_fix_freelist+0x6a at ffffffff8141939a
__xfs_free_extent+0x99 at ffffffff81419499
xfs_trans_free_extent+0x3e at ffffffff814a6fee
xfs_extent_free_finish_item+0x24 at ffffffff814a70d4
xfs_defer_finish_noroll+0x1f7 at ffffffff81441407
xfs_defer_finish+0x11 at ffffffff814417e1
xfs_itruncate_extents_flags+0x13d at ffffffff8148b7dd
xfs_inactive_truncate+0xb9 at ffffffff8148bb89
xfs_inactive+0x227 at ffffffff8148c4f7
xfs_fs_destroy_inode+0xb8 at ffffffff81496898
destroy_inode+0x3b at ffffffff8127d2ab
do_unlinkat+0x1d1 at ffffffff81270df1
do_syscall_64+0x40 at ffffffff81f6b5f0
entry_SYSCALL_64_after_hwframe+0x44 at ffffffff8200007c
This can also happen in log recovery when processing an EFI
with multiple extents through this path:
context_switch() kernel/sched/core.c:3881
__schedule() kernel/sched/core.c:5111
schedule() kernel/sched/core.c:5186
xfs_extent_busy_flush() fs/xfs/xfs_extent_busy.c:598
xfs_alloc_ag_vextent_size() fs/xfs/libxfs/xfs_alloc.c:1641
xfs_alloc_ag_vextent() fs/xfs/libxfs/xfs_alloc.c:828
xfs_alloc_fix_freelist() fs/xfs/libxfs/xfs_alloc.c:2362
xfs_free_extent_fix_freelist() fs/xfs/libxfs/xfs_alloc.c:3029
__xfs_free_extent() fs/xfs/libxfs/xfs_alloc.c:3067
xfs_trans_free_extent() fs/xfs/xfs_extfree_item.c:370
xfs_efi_recover() fs/xfs/xfs_extfree_item.c:626
xlog_recover_process_efi() fs/xfs/xfs_log_recover.c:4605
xlog_recover_process_intents() fs/xfs/xfs_log_recover.c:4893
xlog_recover_finish() fs/xfs/xfs_log_recover.c:5824
xfs_log_mount_finish() fs/xfs/xfs_log.c:764
xfs_mountfs() fs/xfs/xfs_mount.c:978
xfs_fs_fill_super() fs/xfs/xfs_super.c:1908
mount_bdev() fs/super.c:1417
xfs_fs_mount() fs/xfs/xfs_super.c:1985
legacy_get_tree() fs/fs_context.c:647
vfs_get_tree() fs/super.c:1547
do_new_mount() fs/namespace.c:2843
do_mount() fs/namespace.c:3163
ksys_mount() fs/namespace.c:3372
__do_sys_mount() fs/namespace.c:3386
__se_sys_mount() fs/namespace.c:3383
__x64_sys_mount() fs/namespace.c:3383
do_syscall_64() arch/x86/entry/common.c:296
entry_SYSCALL_64() arch/x86/entry/entry_64.S:180
To avoid this deadlock, we should not block in
xfs_extent_busy_flush() if we hold a busy extent in the current
transaction.
Now that the EFI processing code can handle requeuing a partially
completed EFI, we can detect this situation in
xfs_extent_busy_flush() and return -EAGAIN rather than going to
sleep forever. The -EAGAIN get propagated back out to the
xfs_trans_free_extent() context, where the EFD is populated and the
transaction is rolled, thereby moving the busy extents into the CIL.
At this point, we can retry the extent free operation again with a
clean transaction. If we hit the same "all free extents are busy"
situation when trying to fix up the free list, we can safely call
xfs_extent_busy_flush() and wait for the busy extents to resolve
and wake us. At this point, the allocation search can make progress
again and we can fix up the free list.
This deadlock was first reported by Chandan in mid-2021, but I
couldn't make myself understood during review, and didn't have time
to fix it myself.
It was reported again in March 2023, and again I have found myself
unable to explain the complexities of the solution needed during
review.
As such, I don't have hours more time to waste trying to get the
fix written the way it needs to be written, so I'm just doing it
myself. This patchset is largely based on Wengang Wang's last patch,
but with all the unnecessary stuff removed, split up into multiple
patches and cleaned up somewhat.
Reported-by: Chandan Babu R <chandanrlinux@gmail.com>
Reported-by: Wengang Wang <wen.gang.wang@oracle.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2023-06-28 18:04:33 +00:00
|
|
|
#define XFS_ALLOC_FLAG_TRYLOCK (1U << 0) /* use trylock for buffer locking */
|
|
|
|
#define XFS_ALLOC_FLAG_FREEING (1U << 1) /* indicate caller is freeing extents*/
|
|
|
|
#define XFS_ALLOC_FLAG_NORMAP (1U << 2) /* don't modify the rmapbt */
|
|
|
|
#define XFS_ALLOC_FLAG_NOSHRINK (1U << 3) /* don't shrink the freelist */
|
|
|
|
#define XFS_ALLOC_FLAG_CHECK (1U << 4) /* test only, don't modify args */
|
|
|
|
#define XFS_ALLOC_FLAG_TRYFLUSH (1U << 5) /* don't wait in busy extent flush */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Argument structure for xfs_alloc routines.
|
|
|
|
* This is turned into a structure to avoid having 20 arguments passed
|
|
|
|
* down several levels of the stack.
|
|
|
|
*/
|
|
|
|
typedef struct xfs_alloc_arg {
|
|
|
|
struct xfs_trans *tp; /* transaction pointer */
|
|
|
|
struct xfs_mount *mp; /* file system mount point */
|
|
|
|
struct xfs_buf *agbp; /* buffer for a.g. freelist header */
|
|
|
|
struct xfs_perag *pag; /* per-ag struct for this agno */
|
|
|
|
xfs_fsblock_t fsbno; /* file system block number */
|
|
|
|
xfs_agnumber_t agno; /* allocation group number */
|
|
|
|
xfs_agblock_t agbno; /* allocation group-relative block # */
|
|
|
|
xfs_extlen_t minlen; /* minimum size of extent */
|
|
|
|
xfs_extlen_t maxlen; /* maximum size of extent */
|
|
|
|
xfs_extlen_t mod; /* mod value for extent size */
|
|
|
|
xfs_extlen_t prod; /* prod value for extent size */
|
|
|
|
xfs_extlen_t minleft; /* min blocks must be left after us */
|
|
|
|
xfs_extlen_t total; /* total blocks needed in xaction */
|
|
|
|
xfs_extlen_t alignment; /* align answer to multiple of this */
|
|
|
|
xfs_extlen_t minalignslop; /* slop for minlen+alignment calcs */
|
xfs: support min/max agbno args in block allocator
The block allocator supports various arguments to tweak block allocation
behavior and set allocation requirements. The sparse inode chunk feature
introduces a new requirement not supported by the current arguments.
Sparse inode allocations must convert or merge into an inode record that
describes a fixed length chunk (64 inodes x inodesize). Full inode chunk
allocations by definition always result in valid inode records. Sparse
chunk allocations are smaller and the associated records can refer to
blocks not owned by the inode chunk. This model can result in invalid
inode records in certain cases.
For example, if a sparse allocation occurs near the start of an AG, the
aligned inode record for that chunk might refer to agbno 0. If an
allocation occurs towards the end of the AG and the AG size is not
aligned, the inode record could refer to blocks beyond the end of the
AG. While neither of these scenarios directly result in corruption, they
both insert invalid inode records and at minimum cause repair to
complain, are unlikely to merge into full chunks over time and set land
mines for other areas of code.
To guarantee sparse inode chunk allocation creates valid inode records,
support the ability to specify an agbno range limit for
XFS_ALLOCTYPE_NEAR_BNO block allocations. The min/max agbno's are
specified in the allocation arguments and limit the block allocation
algorithms to that range. The starting 'agbno' hint is clamped to the
range if the specified agbno is out of range. If no sufficient extent is
available within the range, the allocation fails. For backwards
compatibility, the min/max fields can be initialized to 0 to disable
range limiting (e.g., equivalent to min=0,max=agsize).
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-05-28 22:53:00 +00:00
|
|
|
xfs_agblock_t min_agbno; /* set an agbno range for NEAR allocs */
|
|
|
|
xfs_agblock_t max_agbno; /* ... */
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_extlen_t len; /* output: actual size of extent */
|
xfs: remote attribute blocks aren't really userdata
When adding a new remote attribute, we write the attribute to the
new extent before the allocation transaction is committed. This
means we cannot reuse busy extents as that violates crash
consistency semantics. Hence we currently treat remote attribute
extent allocation like userdata because it has the same overwrite
ordering constraints as userdata.
Unfortunately, this also allows the allocator to incorrectly apply
extent size hints to the remote attribute extent allocation. This
results in interesting failures, such as transaction block
reservation overruns and in-memory inode attribute fork corruption.
To fix this, we need to separate the busy extent reuse configuration
from the userdata configuration. This changes the definition of
XFS_BMAPI_METADATA slightly - it now means that allocation is
metadata and reuse of busy extents is acceptible due to the metadata
ordering semantics of the journal. If this flag is not set, it
means the allocation is that has unordered data writeback, and hence
busy extent reuse is not allowed. It no longer implies the
allocation is for user data, just that the data write will not be
strictly ordered. This matches the semantics for both user data
and remote attribute block allocation.
As such, This patch changes the "userdata" field to a "datatype"
field, and adds a "no busy reuse" flag to the field.
When we detect an unordered data extent allocation, we immediately set
the no reuse flag. We then set the "user data" flags based on the
inode fork we are allocating the extent to. Hence we only set
userdata flags on data fork allocations now and consider attribute
fork remote extents to be an unordered metadata extent.
The result is that remote attribute extents now have the expected
allocation semantics, and the data fork allocation behaviour is
completely unchanged.
It should be noted that there may be other ways to fix this (e.g.
use ordered metadata buffers for the remote attribute extent data
write) but they are more invasive and difficult to validate both
from a design and implementation POV. Hence this patch takes the
simple, obvious route to fixing the problem...
Reported-and-tested-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-09-25 22:21:28 +00:00
|
|
|
int datatype; /* mask defining data type treatment */
|
2005-04-16 22:20:36 +00:00
|
|
|
char wasdel; /* set if allocation was prev delayed */
|
|
|
|
char wasfromfl; /* set if allocation is from freelist */
|
2016-08-03 01:33:42 +00:00
|
|
|
struct xfs_owner_info oinfo; /* owner of blocks being allocated */
|
xfs: set up per-AG free space reservations
One unfortunate quirk of the reference count and reverse mapping
btrees -- they can expand in size when blocks are written to *other*
allocation groups if, say, one large extent becomes a lot of tiny
extents. Since we don't want to start throwing errors in the middle
of CoWing, we need to reserve some blocks to handle future expansion.
The transaction block reservation counters aren't sufficient here
because we have to have a reserve of blocks in every AG, not just
somewhere in the filesystem.
Therefore, create two per-AG block reservation pools. One feeds the
AGFL so that rmapbt expansion always succeeds, and the other feeds all
other metadata so that refcountbt expansion never fails.
Use the count of how many reserved blocks we need to have on hand to
create a virtual reservation in the AG. Through selective clamping of
the maximum length of allocation requests and of the length of the
longest free extent, we can make it look like there's less free space
in the AG unless the reservation owner is asking for blocks.
In other words, play some accounting tricks in-core to make sure that
we always have blocks available. On the plus side, there's nothing to
clean up if we crash, which is contrast to the strategy that the rough
draft used (actually removing extents from the freespace btrees).
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-09-19 00:30:52 +00:00
|
|
|
enum xfs_ag_resv_type resv; /* block reservation to use */
|
2021-01-23 00:48:17 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
bool alloc_minlen_only; /* allocate exact minlen extent */
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
} xfs_alloc_arg_t;
|
|
|
|
|
|
|
|
/*
|
xfs: remote attribute blocks aren't really userdata
When adding a new remote attribute, we write the attribute to the
new extent before the allocation transaction is committed. This
means we cannot reuse busy extents as that violates crash
consistency semantics. Hence we currently treat remote attribute
extent allocation like userdata because it has the same overwrite
ordering constraints as userdata.
Unfortunately, this also allows the allocator to incorrectly apply
extent size hints to the remote attribute extent allocation. This
results in interesting failures, such as transaction block
reservation overruns and in-memory inode attribute fork corruption.
To fix this, we need to separate the busy extent reuse configuration
from the userdata configuration. This changes the definition of
XFS_BMAPI_METADATA slightly - it now means that allocation is
metadata and reuse of busy extents is acceptible due to the metadata
ordering semantics of the journal. If this flag is not set, it
means the allocation is that has unordered data writeback, and hence
busy extent reuse is not allowed. It no longer implies the
allocation is for user data, just that the data write will not be
strictly ordered. This matches the semantics for both user data
and remote attribute block allocation.
As such, This patch changes the "userdata" field to a "datatype"
field, and adds a "no busy reuse" flag to the field.
When we detect an unordered data extent allocation, we immediately set
the no reuse flag. We then set the "user data" flags based on the
inode fork we are allocating the extent to. Hence we only set
userdata flags on data fork allocations now and consider attribute
fork remote extents to be an unordered metadata extent.
The result is that remote attribute extents now have the expected
allocation semantics, and the data fork allocation behaviour is
completely unchanged.
It should be noted that there may be other ways to fix this (e.g.
use ordered metadata buffers for the remote attribute extent data
write) but they are more invasive and difficult to validate both
from a design and implementation POV. Hence this patch takes the
simple, obvious route to fixing the problem...
Reported-and-tested-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-09-25 22:21:28 +00:00
|
|
|
* Defines for datatype
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2019-09-23 20:02:41 +00:00
|
|
|
#define XFS_ALLOC_USERDATA (1 << 0)/* allocation is for user data*/
|
|
|
|
#define XFS_ALLOC_INITIAL_USER_DATA (1 << 1)/* special case start of file */
|
2019-10-30 19:25:00 +00:00
|
|
|
#define XFS_ALLOC_NOBUSY (1 << 2)/* Busy extents not allowed */
|
xfs: remote attribute blocks aren't really userdata
When adding a new remote attribute, we write the attribute to the
new extent before the allocation transaction is committed. This
means we cannot reuse busy extents as that violates crash
consistency semantics. Hence we currently treat remote attribute
extent allocation like userdata because it has the same overwrite
ordering constraints as userdata.
Unfortunately, this also allows the allocator to incorrectly apply
extent size hints to the remote attribute extent allocation. This
results in interesting failures, such as transaction block
reservation overruns and in-memory inode attribute fork corruption.
To fix this, we need to separate the busy extent reuse configuration
from the userdata configuration. This changes the definition of
XFS_BMAPI_METADATA slightly - it now means that allocation is
metadata and reuse of busy extents is acceptible due to the metadata
ordering semantics of the journal. If this flag is not set, it
means the allocation is that has unordered data writeback, and hence
busy extent reuse is not allowed. It no longer implies the
allocation is for user data, just that the data write will not be
strictly ordered. This matches the semantics for both user data
and remote attribute block allocation.
As such, This patch changes the "userdata" field to a "datatype"
field, and adds a "no busy reuse" flag to the field.
When we detect an unordered data extent allocation, we immediately set
the no reuse flag. We then set the "user data" flags based on the
inode fork we are allocating the extent to. Hence we only set
userdata flags on data fork allocations now and consider attribute
fork remote extents to be an unordered metadata extent.
The result is that remote attribute extents now have the expected
allocation semantics, and the data fork allocation behaviour is
completely unchanged.
It should be noted that there may be other ways to fix this (e.g.
use ordered metadata buffers for the remote attribute extent data
write) but they are more invasive and difficult to validate both
from a design and implementation POV. Hence this patch takes the
simple, obvious route to fixing the problem...
Reported-and-tested-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-09-25 22:21:28 +00:00
|
|
|
|
2016-08-03 01:38:24 +00:00
|
|
|
/* freespace limit calculations */
|
|
|
|
unsigned int xfs_alloc_set_aside(struct xfs_mount *mp);
|
|
|
|
unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp);
|
|
|
|
|
2018-04-06 17:09:42 +00:00
|
|
|
xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_perag *pag,
|
|
|
|
xfs_extlen_t need, xfs_extlen_t reserved);
|
2015-06-22 00:13:30 +00:00
|
|
|
unsigned int xfs_alloc_min_freelist(struct xfs_mount *mp,
|
2009-03-16 07:29:46 +00:00
|
|
|
struct xfs_perag *pag);
|
2022-07-07 09:08:01 +00:00
|
|
|
int xfs_alloc_get_freelist(struct xfs_perag *pag, struct xfs_trans *tp,
|
|
|
|
struct xfs_buf *agfbp, xfs_agblock_t *bnop, int btreeblk);
|
2022-07-07 09:08:08 +00:00
|
|
|
int xfs_alloc_put_freelist(struct xfs_perag *pag, struct xfs_trans *tp,
|
|
|
|
struct xfs_buf *agfbp, struct xfs_buf *agflbp,
|
|
|
|
xfs_agblock_t bno, int btreeblk);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2021-10-13 17:02:19 +00:00
|
|
|
* Compute and fill in value of m_alloc_maxlevels.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_alloc_compute_maxlevels(
|
|
|
|
struct xfs_mount *mp); /* file system mount structure */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Log the given fields from the agf structure.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_alloc_log_agf(
|
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
|
|
|
struct xfs_buf *bp, /* buffer for a.g. freelist header */
|
2022-04-21 00:46:16 +00:00
|
|
|
uint32_t fields);/* mask of fields to be logged (XFS_AGF_...) */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-02-12 22:14:53 +00:00
|
|
|
/*
|
2023-02-12 22:14:54 +00:00
|
|
|
* Allocate an extent anywhere in the specific AG given. If there is no
|
|
|
|
* space matching the requirements in that AG, then the allocation will fail.
|
2023-02-12 22:14:53 +00:00
|
|
|
*/
|
2023-02-12 22:14:54 +00:00
|
|
|
int xfs_alloc_vextent_this_ag(struct xfs_alloc_arg *args, xfs_agnumber_t agno);
|
2023-02-12 22:14:53 +00:00
|
|
|
|
2023-02-12 22:14:54 +00:00
|
|
|
/*
|
|
|
|
* Allocate an extent as close to the target as possible. If there are not
|
|
|
|
* viable candidates in the AG, then fail the allocation.
|
|
|
|
*/
|
|
|
|
int xfs_alloc_vextent_near_bno(struct xfs_alloc_arg *args,
|
|
|
|
xfs_fsblock_t target);
|
|
|
|
|
2023-02-12 22:14:54 +00:00
|
|
|
/*
|
|
|
|
* Allocate an extent exactly at the target given. If this is not possible
|
|
|
|
* then the allocation fails.
|
|
|
|
*/
|
|
|
|
int xfs_alloc_vextent_exact_bno(struct xfs_alloc_arg *args,
|
|
|
|
xfs_fsblock_t target);
|
|
|
|
|
2023-02-12 22:14:53 +00:00
|
|
|
/*
|
|
|
|
* Best effort full filesystem allocation scan.
|
|
|
|
*
|
|
|
|
* Locality aware allocation will be attempted in the initial AG, but on failure
|
|
|
|
* non-localised attempts will be made. The AGs are constrained by previous
|
|
|
|
* allocations in the current transaction. Two passes will be made - the first
|
|
|
|
* non-blocking, the second blocking.
|
|
|
|
*/
|
|
|
|
int xfs_alloc_vextent_start_ag(struct xfs_alloc_arg *args,
|
|
|
|
xfs_fsblock_t target);
|
|
|
|
|
2023-02-12 22:14:53 +00:00
|
|
|
/*
|
|
|
|
* Iterate from the AG indicated from args->fsbno through to the end of the
|
|
|
|
* filesystem attempting blocking allocation. This is for use in last
|
|
|
|
* resort allocation attempts when everything else has failed.
|
|
|
|
*/
|
|
|
|
int xfs_alloc_vextent_first_ag(struct xfs_alloc_arg *args,
|
|
|
|
xfs_fsblock_t target);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Free an extent.
|
|
|
|
*/
|
|
|
|
int /* error */
|
2018-05-09 15:45:04 +00:00
|
|
|
__xfs_free_extent(
|
2016-08-03 01:33:42 +00:00
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
2023-04-12 01:59:53 +00:00
|
|
|
struct xfs_perag *pag,
|
|
|
|
xfs_agblock_t agbno,
|
2016-08-03 01:33:42 +00:00
|
|
|
xfs_extlen_t len, /* length of extent */
|
2018-12-12 16:46:23 +00:00
|
|
|
const struct xfs_owner_info *oinfo, /* extent owner */
|
2018-05-09 15:45:04 +00:00
|
|
|
enum xfs_ag_resv_type type, /* block reservation type */
|
|
|
|
bool skip_discard);
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
xfs_free_extent(
|
|
|
|
struct xfs_trans *tp,
|
2023-04-12 01:59:53 +00:00
|
|
|
struct xfs_perag *pag,
|
|
|
|
xfs_agblock_t agbno,
|
2018-05-09 15:45:04 +00:00
|
|
|
xfs_extlen_t len,
|
2018-12-12 16:46:23 +00:00
|
|
|
const struct xfs_owner_info *oinfo,
|
2018-05-09 15:45:04 +00:00
|
|
|
enum xfs_ag_resv_type type)
|
|
|
|
{
|
2023-04-12 01:59:53 +00:00
|
|
|
return __xfs_free_extent(tp, pag, agbno, len, oinfo, type, false);
|
2018-05-09 15:45:04 +00:00
|
|
|
}
|
|
|
|
|
2018-01-17 02:52:12 +00:00
|
|
|
int /* error */
|
|
|
|
xfs_alloc_lookup_le(
|
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
|
xfs_agblock_t bno, /* starting block of extent */
|
|
|
|
xfs_extlen_t len, /* length of extent */
|
|
|
|
int *stat); /* success/failure */
|
|
|
|
|
2012-03-22 05:15:12 +00:00
|
|
|
int /* error */
|
|
|
|
xfs_alloc_lookup_ge(
|
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
|
xfs_agblock_t bno, /* starting block of extent */
|
|
|
|
xfs_extlen_t len, /* length of extent */
|
|
|
|
int *stat); /* success/failure */
|
|
|
|
|
2011-01-07 13:02:04 +00:00
|
|
|
int /* error */
|
|
|
|
xfs_alloc_get_rec(
|
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
|
xfs_agblock_t *bno, /* output: starting block of extent */
|
|
|
|
xfs_extlen_t *len, /* output: length of extent */
|
|
|
|
int *stat); /* output: success/failure */
|
|
|
|
|
2023-04-12 02:00:01 +00:00
|
|
|
union xfs_btree_rec;
|
|
|
|
void xfs_alloc_btrec_to_irec(const union xfs_btree_rec *rec,
|
|
|
|
struct xfs_alloc_rec_incore *irec);
|
2023-12-15 18:03:32 +00:00
|
|
|
xfs_failaddr_t xfs_alloc_check_irec(struct xfs_perag *pag,
|
2023-04-12 02:00:01 +00:00
|
|
|
const struct xfs_alloc_rec_incore *irec);
|
|
|
|
|
2022-07-07 09:07:54 +00:00
|
|
|
int xfs_read_agf(struct xfs_perag *pag, struct xfs_trans *tp, int flags,
|
|
|
|
struct xfs_buf **agfbpp);
|
2022-07-07 09:07:40 +00:00
|
|
|
int xfs_alloc_read_agf(struct xfs_perag *pag, struct xfs_trans *tp, int flags,
|
|
|
|
struct xfs_buf **agfbpp);
|
2022-07-07 09:08:15 +00:00
|
|
|
int xfs_alloc_read_agfl(struct xfs_perag *pag, struct xfs_trans *tp,
|
|
|
|
struct xfs_buf **bpp);
|
2018-05-08 00:38:46 +00:00
|
|
|
int xfs_free_agfl_block(struct xfs_trans *, xfs_agnumber_t, xfs_agblock_t,
|
|
|
|
struct xfs_buf *, struct xfs_owner_info *);
|
2023-06-28 18:04:32 +00:00
|
|
|
int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, uint32_t alloc_flags);
|
2021-06-02 00:48:24 +00:00
|
|
|
int xfs_free_extent_fix_freelist(struct xfs_trans *tp, struct xfs_perag *pag,
|
2016-06-21 01:53:28 +00:00
|
|
|
struct xfs_buf **agbp);
|
2014-11-28 03:25:04 +00:00
|
|
|
|
2016-08-03 01:31:47 +00:00
|
|
|
xfs_extlen_t xfs_prealloc_blocks(struct xfs_mount *mp);
|
|
|
|
|
2017-03-28 21:56:35 +00:00
|
|
|
typedef int (*xfs_alloc_query_range_fn)(
|
2021-08-11 00:02:16 +00:00
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
const struct xfs_alloc_rec_incore *rec,
|
|
|
|
void *priv);
|
2017-03-28 21:56:35 +00:00
|
|
|
|
|
|
|
int xfs_alloc_query_range(struct xfs_btree_cur *cur,
|
2021-08-11 00:02:15 +00:00
|
|
|
const struct xfs_alloc_rec_incore *low_rec,
|
|
|
|
const struct xfs_alloc_rec_incore *high_rec,
|
2017-03-28 21:56:35 +00:00
|
|
|
xfs_alloc_query_range_fn fn, void *priv);
|
2017-03-28 21:56:35 +00:00
|
|
|
int xfs_alloc_query_all(struct xfs_btree_cur *cur, xfs_alloc_query_range_fn fn,
|
|
|
|
void *priv);
|
2017-03-28 21:56:35 +00:00
|
|
|
|
2023-04-12 02:00:10 +00:00
|
|
|
int xfs_alloc_has_records(struct xfs_btree_cur *cur, xfs_agblock_t bno,
|
|
|
|
xfs_extlen_t len, enum xbtree_recpacking *outcome);
|
2018-01-17 02:52:12 +00:00
|
|
|
|
2018-05-14 13:34:34 +00:00
|
|
|
typedef int (*xfs_agfl_walk_fn)(struct xfs_mount *mp, xfs_agblock_t bno,
|
|
|
|
void *priv);
|
|
|
|
int xfs_agfl_walk(struct xfs_mount *mp, struct xfs_agf *agf,
|
|
|
|
struct xfs_buf *agflbp, xfs_agfl_walk_fn walk_fn, void *priv);
|
|
|
|
|
2020-03-10 15:57:28 +00:00
|
|
|
static inline __be32 *
|
|
|
|
xfs_buf_to_agfl_bno(
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
2021-08-19 01:46:55 +00:00
|
|
|
if (xfs_has_crc(bp->b_mount))
|
2020-03-10 15:57:28 +00:00
|
|
|
return bp->b_addr + sizeof(struct xfs_agfl);
|
|
|
|
return bp->b_addr;
|
|
|
|
}
|
|
|
|
|
2023-12-07 02:40:57 +00:00
|
|
|
int xfs_free_extent_later(struct xfs_trans *tp, xfs_fsblock_t bno,
|
2021-10-12 21:17:01 +00:00
|
|
|
xfs_filblks_t len, const struct xfs_owner_info *oinfo,
|
2023-06-28 18:04:32 +00:00
|
|
|
enum xfs_ag_resv_type type, bool skip_discard);
|
2021-10-12 21:17:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* List of extents to be free "later".
|
|
|
|
* The list is kept sorted on xbf_startblock.
|
|
|
|
*/
|
|
|
|
struct xfs_extent_free_item {
|
|
|
|
struct list_head xefi_list;
|
2021-10-12 22:55:54 +00:00
|
|
|
uint64_t xefi_owner;
|
2021-10-12 21:17:01 +00:00
|
|
|
xfs_fsblock_t xefi_startblock;/* starting fs block number */
|
|
|
|
xfs_extlen_t xefi_blockcount;/* number of blocks in extent */
|
2023-04-12 01:59:54 +00:00
|
|
|
struct xfs_perag *xefi_pag;
|
2021-10-12 22:55:54 +00:00
|
|
|
unsigned int xefi_flags;
|
2023-06-28 18:04:32 +00:00
|
|
|
enum xfs_ag_resv_type xefi_agresv;
|
2021-10-12 21:17:01 +00:00
|
|
|
};
|
|
|
|
|
2023-04-12 01:59:54 +00:00
|
|
|
void xfs_extent_free_get_group(struct xfs_mount *mp,
|
|
|
|
struct xfs_extent_free_item *xefi);
|
|
|
|
|
2021-10-12 22:55:54 +00:00
|
|
|
#define XFS_EFI_SKIP_DISCARD (1U << 0) /* don't issue discard */
|
|
|
|
#define XFS_EFI_ATTR_FORK (1U << 1) /* freeing attr fork block */
|
|
|
|
#define XFS_EFI_BMBT_BLOCK (1U << 2) /* freeing bmap btree block */
|
xfs: automatic freeing of freshly allocated unwritten space
As mentioned in the previous commit, online repair wants to allocate
space to write out a new metadata structure, and it also wants to hedge
against system crashes during repairs by logging (and later cancelling)
EFIs to free the space if we crash before committing the new data
structure.
Therefore, create a trio of functions to schedule automatic reaping of
freshly allocated unwritten space. xfs_alloc_schedule_autoreap creates
a paused EFI representing the space we just allocated. Once the
allocations are made and the autoreaps scheduled, we can start writing
to disk.
If the writes succeed, xfs_alloc_cancel_autoreap marks the EFI work
items as stale and unpauses the pending deferred work item. Assuming
that's done in the same transaction that commits the new structure into
the filesystem, we guarantee that either the new object is fully
visible, or that all the space gets reclaimed.
If the writes succeed but only part of an extent was used, repair must
call the same _cancel_autoreap function to kill the first EFI and then
log a new EFI to free the unused space. The first EFI is already
committed, so it cannot be changed.
For full extents that aren't used, xfs_alloc_commit_autoreap will
unpause the EFI, which results in the space being freed during the next
_defer_finish cycle.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2023-12-07 02:40:57 +00:00
|
|
|
#define XFS_EFI_CANCELLED (1U << 3) /* dont actually free the space */
|
|
|
|
|
|
|
|
struct xfs_alloc_autoreap {
|
|
|
|
struct xfs_defer_pending *dfp;
|
|
|
|
};
|
|
|
|
|
|
|
|
int xfs_alloc_schedule_autoreap(const struct xfs_alloc_arg *args,
|
|
|
|
bool skip_discard, struct xfs_alloc_autoreap *aarp);
|
|
|
|
void xfs_alloc_cancel_autoreap(struct xfs_trans *tp,
|
|
|
|
struct xfs_alloc_autoreap *aarp);
|
|
|
|
void xfs_alloc_commit_autoreap(struct xfs_trans *tp,
|
|
|
|
struct xfs_alloc_autoreap *aarp);
|
2021-10-12 22:55:54 +00:00
|
|
|
|
2021-10-12 21:17:01 +00:00
|
|
|
extern struct kmem_cache *xfs_extfree_item_cache;
|
|
|
|
|
|
|
|
int __init xfs_extfree_intent_init_cache(void);
|
|
|
|
void xfs_extfree_intent_destroy_cache(void);
|
|
|
|
|
2023-06-29 17:15:45 +00:00
|
|
|
xfs_failaddr_t xfs_validate_ag_length(struct xfs_buf *bp, uint32_t seqno,
|
|
|
|
uint32_t length);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif /* __XFS_ALLOC_H__ */
|