xfs: online repair of file fork mappings [v28.3]

In this series, online repair gains the ability to rebuild data and attr
 fork mappings from the reverse mapping information.  It is at this point
 where we reintroduce the ability to reap file extents.
 
 Repair of CoW forks is a little different -- on disk, CoW staging
 extents are owned by the refcount btree and cannot be mapped back to
 individual files.  Hence we can only detect staging extents that don't
 quite look right (missing reverse mappings, shared staging extents) and
 replace them with fresh allocations.
 
 This has been running on the djcloud for months with no problems.  Enjoy!
 
 Signed-off-by: Darrick J. Wong <djwong@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQ2qTKExjcn+O1o2YRKO3ySh0YRpgUCZXzKAwAKCRBKO3ySh0YR
 prEyAP95FTXnn0pe08gcyzICmcnMM762VNleDRawhAj+LU2J4AEAj4eOC5AelmYo
 MHwdRYJzug539fliCF8m6a+id+aeCQo=
 =cONL
 -----END PGP SIGNATURE-----

Merge tag 'repair-file-mappings-6.8_2023-12-15' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.8-mergeB

xfs: online repair of file fork mappings

In this series, online repair gains the ability to rebuild data and attr
fork mappings from the reverse mapping information.  It is at this point
where we reintroduce the ability to reap file extents.

Repair of CoW forks is a little different -- on disk, CoW staging
extents are owned by the refcount btree and cannot be mapped back to
individual files.  Hence we can only detect staging extents that don't
quite look right (missing reverse mappings, shared staging extents) and
replace them with fresh allocations.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>

* tag 'repair-file-mappings-6.8_2023-12-15' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: repair problems in CoW forks
  xfs: create a ranged query function for refcount btrees
  xfs: refactor repair forcing tests into a repair.c helper
  xfs: repair inode fork block mapping data structures
  xfs: reintroduce reaping of file metadata blocks to xrep_reap_extents
This commit is contained in:
Chandan Babu R 2023-12-16 08:44:55 +05:30
commit 98e63b91cd
24 changed files with 2160 additions and 52 deletions

View File

@ -183,6 +183,8 @@ ifeq ($(CONFIG_XFS_ONLINE_REPAIR),y)
xfs-y += $(addprefix scrub/, \
agheader_repair.o \
alloc_repair.o \
bmap_repair.o \
cow_repair.o \
ialloc_repair.o \
inode_repair.o \
newbt.o \

View File

@ -15,6 +15,7 @@
#include "xfs_trans.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_btree_staging.h"
#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
@ -288,10 +289,7 @@ xfs_bmbt_get_minrecs(
int level)
{
if (level == cur->bc_nlevels - 1) {
struct xfs_ifork *ifp;
ifp = xfs_ifork_ptr(cur->bc_ino.ip,
cur->bc_ino.whichfork);
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
return xfs_bmbt_maxrecs(cur->bc_mp,
ifp->if_broot_bytes, level == 0) / 2;
@ -306,10 +304,7 @@ xfs_bmbt_get_maxrecs(
int level)
{
if (level == cur->bc_nlevels - 1) {
struct xfs_ifork *ifp;
ifp = xfs_ifork_ptr(cur->bc_ino.ip,
cur->bc_ino.whichfork);
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
return xfs_bmbt_maxrecs(cur->bc_mp,
ifp->if_broot_bytes, level == 0);
@ -543,23 +538,19 @@ static const struct xfs_btree_ops xfs_bmbt_ops = {
.keys_contiguous = xfs_bmbt_keys_contiguous,
};
/*
* Allocate a new bmap btree cursor.
*/
struct xfs_btree_cur * /* new bmap btree cursor */
xfs_bmbt_init_cursor(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
struct xfs_inode *ip, /* inode owning the btree */
int whichfork) /* data or attr fork */
static struct xfs_btree_cur *
xfs_bmbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork)
{
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_cur *cur;
ASSERT(whichfork != XFS_COW_FORK);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP,
mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
cur->bc_ops = &xfs_bmbt_ops;
@ -567,10 +558,30 @@ xfs_bmbt_init_cursor(
if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
cur->bc_ino.forksize = xfs_inode_fork_size(ip, whichfork);
cur->bc_ino.ip = ip;
cur->bc_ino.allocated = 0;
cur->bc_ino.flags = 0;
return cur;
}
/*
* Allocate a new bmap btree cursor.
*/
struct xfs_btree_cur *
xfs_bmbt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork)
{
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_cur *cur;
cur = xfs_bmbt_init_common(mp, tp, ip, whichfork);
cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
cur->bc_ino.forksize = xfs_inode_fork_size(ip, whichfork);
cur->bc_ino.whichfork = whichfork;
return cur;
@ -587,6 +598,76 @@ xfs_bmbt_block_maxrecs(
return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
}
/*
* Allocate a new bmap btree cursor for reloading an inode block mapping data
* structure. Note that callers can use the staged cursor to reload extents
* format inode forks if they rebuild the iext tree and commit the staged
* cursor immediately.
*/
struct xfs_btree_cur *
xfs_bmbt_stage_cursor(
struct xfs_mount *mp,
struct xfs_inode *ip,
struct xbtree_ifakeroot *ifake)
{
struct xfs_btree_cur *cur;
struct xfs_btree_ops *ops;
/* data fork always has larger maxheight */
cur = xfs_bmbt_init_common(mp, NULL, ip, XFS_DATA_FORK);
cur->bc_nlevels = ifake->if_levels;
cur->bc_ino.forksize = ifake->if_fork_size;
/* Don't let anyone think we're attached to the real fork yet. */
cur->bc_ino.whichfork = -1;
xfs_btree_stage_ifakeroot(cur, ifake, &ops);
ops->update_cursor = NULL;
return cur;
}
/*
* Swap in the new inode fork root. Once we pass this point the newly rebuilt
* mappings are in place and we have to kill off any old btree blocks.
*/
void
xfs_bmbt_commit_staged_btree(
struct xfs_btree_cur *cur,
struct xfs_trans *tp,
int whichfork)
{
struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake;
struct xfs_ifork *ifp;
static const short brootflag[2] = {XFS_ILOG_DBROOT, XFS_ILOG_ABROOT};
static const short extflag[2] = {XFS_ILOG_DEXT, XFS_ILOG_AEXT};
int flags = XFS_ILOG_CORE;
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
ASSERT(whichfork != XFS_COW_FORK);
/*
* Free any resources hanging off the real fork, then shallow-copy the
* staging fork's contents into the real fork to transfer everything
* we just built.
*/
ifp = xfs_ifork_ptr(cur->bc_ino.ip, whichfork);
xfs_idestroy_fork(ifp);
memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork));
switch (ifp->if_format) {
case XFS_DINODE_FMT_EXTENTS:
flags |= extflag[whichfork];
break;
case XFS_DINODE_FMT_BTREE:
flags |= brootflag[whichfork];
break;
default:
ASSERT(0);
break;
}
xfs_trans_log_inode(tp, cur->bc_ino.ip, flags);
xfs_btree_commit_ifakeroot(cur, tp, whichfork, &xfs_bmbt_ops);
}
/*
* Calculate number of records in a bmap btree block.
*/

View File

@ -11,6 +11,7 @@ struct xfs_btree_block;
struct xfs_mount;
struct xfs_inode;
struct xfs_trans;
struct xbtree_ifakeroot;
/*
* Btree block header size depends on a superblock flag.
@ -106,6 +107,10 @@ extern int xfs_bmbt_change_owner(struct xfs_trans *tp, struct xfs_inode *ip,
extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_inode *, int);
struct xfs_btree_cur *xfs_bmbt_stage_cursor(struct xfs_mount *mp,
struct xfs_inode *ip, struct xbtree_ifakeroot *ifake);
void xfs_bmbt_commit_staged_btree(struct xfs_btree_cur *cur,
struct xfs_trans *tp, int whichfork);
extern unsigned long long xfs_bmbt_calc_size(struct xfs_mount *mp,
unsigned long long len);

View File

@ -405,7 +405,7 @@ xfs_btree_bload_prep_block(
ASSERT(*bpp == NULL);
/* Allocate a new incore btree root block. */
new_size = bbl->iroot_size(cur, nr_this_block, priv);
new_size = bbl->iroot_size(cur, level, nr_this_block, priv);
ifp->if_broot = kmem_zalloc(new_size, 0);
ifp->if_broot_bytes = (int)new_size;
@ -596,7 +596,14 @@ xfs_btree_bload_level_geometry(
unsigned int desired_npb;
unsigned int maxnr;
maxnr = cur->bc_ops->get_maxrecs(cur, level);
/*
* Compute the absolute maximum number of records that we can store in
* the ondisk block or inode root.
*/
if (cur->bc_ops->get_dmaxrecs)
maxnr = cur->bc_ops->get_dmaxrecs(cur, level);
else
maxnr = cur->bc_ops->get_maxrecs(cur, level);
/*
* Compute the number of blocks we need to fill each block with the

View File

@ -53,7 +53,7 @@ typedef int (*xfs_btree_bload_get_records_fn)(struct xfs_btree_cur *cur,
typedef int (*xfs_btree_bload_claim_block_fn)(struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr, void *priv);
typedef size_t (*xfs_btree_bload_iroot_size_fn)(struct xfs_btree_cur *cur,
unsigned int nr_this_level, void *priv);
unsigned int level, unsigned int nr_this_level, void *priv);
struct xfs_btree_bload {
/*

View File

@ -622,13 +622,11 @@ static inline void xfs_iext_inc_seq(struct xfs_ifork *ifp)
}
void
xfs_iext_insert(
struct xfs_inode *ip,
xfs_iext_insert_raw(
struct xfs_ifork *ifp,
struct xfs_iext_cursor *cur,
struct xfs_bmbt_irec *irec,
int state)
struct xfs_bmbt_irec *irec)
{
struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
xfs_fileoff_t offset = irec->br_startoff;
struct xfs_iext_leaf *new = NULL;
int nr_entries, i;
@ -662,12 +660,23 @@ xfs_iext_insert(
xfs_iext_set(cur_rec(cur), irec);
ifp->if_bytes += sizeof(struct xfs_iext_rec);
trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
if (new)
xfs_iext_insert_node(ifp, xfs_iext_leaf_key(new, 0), new, 2);
}
void
xfs_iext_insert(
struct xfs_inode *ip,
struct xfs_iext_cursor *cur,
struct xfs_bmbt_irec *irec,
int state)
{
struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
xfs_iext_insert_raw(ifp, cur, irec);
trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
}
static struct xfs_iext_node *
xfs_iext_rebalance_node(
struct xfs_iext_node *parent,

View File

@ -520,6 +520,7 @@ xfs_idata_realloc(
ifp->if_bytes = new_size;
}
/* Free all memory and reset a fork back to its initial state. */
void
xfs_idestroy_fork(
struct xfs_ifork *ifp)

View File

@ -180,6 +180,9 @@ void xfs_init_local_fork(struct xfs_inode *ip, int whichfork,
const void *data, int64_t size);
xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp);
void xfs_iext_insert_raw(struct xfs_ifork *ifp,
struct xfs_iext_cursor *cur,
struct xfs_bmbt_irec *irec);
void xfs_iext_insert(struct xfs_inode *, struct xfs_iext_cursor *cur,
struct xfs_bmbt_irec *, int);
void xfs_iext_remove(struct xfs_inode *, struct xfs_iext_cursor *,

View File

@ -2031,6 +2031,47 @@ xfs_refcount_has_records(
return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
}
struct xfs_refcount_query_range_info {
xfs_refcount_query_range_fn fn;
void *priv;
};
/* Format btree record and pass to our callback. */
STATIC int
xfs_refcount_query_range_helper(
struct xfs_btree_cur *cur,
const union xfs_btree_rec *rec,
void *priv)
{
struct xfs_refcount_query_range_info *query = priv;
struct xfs_refcount_irec irec;
xfs_failaddr_t fa;
xfs_refcount_btrec_to_irec(rec, &irec);
fa = xfs_refcount_check_irec(cur->bc_ag.pag, &irec);
if (fa)
return xfs_refcount_complain_bad_rec(cur, fa, &irec);
return query->fn(cur, &irec, query->priv);
}
/* Find all refcount records between two keys. */
int
xfs_refcount_query_range(
struct xfs_btree_cur *cur,
const struct xfs_refcount_irec *low_rec,
const struct xfs_refcount_irec *high_rec,
xfs_refcount_query_range_fn fn,
void *priv)
{
union xfs_btree_irec low_brec = { .rc = *low_rec };
union xfs_btree_irec high_brec = { .rc = *high_rec };
struct xfs_refcount_query_range_info query = { .priv = priv, .fn = fn };
return xfs_btree_query_range(cur, &low_brec, &high_brec,
xfs_refcount_query_range_helper, &query);
}
int __init
xfs_refcount_intent_init_cache(void)
{

View File

@ -127,4 +127,14 @@ extern struct kmem_cache *xfs_refcount_intent_cache;
int __init xfs_refcount_intent_init_cache(void);
void xfs_refcount_intent_destroy_cache(void);
typedef int (*xfs_refcount_query_range_fn)(
struct xfs_btree_cur *cur,
const struct xfs_refcount_irec *rec,
void *priv);
int xfs_refcount_query_range(struct xfs_btree_cur *cur,
const struct xfs_refcount_irec *low_rec,
const struct xfs_refcount_irec *high_rec,
xfs_refcount_query_range_fn fn, void *priv);
#endif /* __XFS_REFCOUNT_H__ */

View File

@ -50,9 +50,18 @@ xchk_setup_inode_bmap(
if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
sc->sm->sm_type != XFS_SCRUB_TYPE_BMBTA) {
struct address_space *mapping = VFS_I(sc->ip)->i_mapping;
bool is_repair = xchk_could_repair(sc);
xchk_ilock(sc, XFS_MMAPLOCK_EXCL);
/* Break all our leases, we're going to mess with things. */
if (is_repair) {
error = xfs_break_layouts(VFS_I(sc->ip),
&sc->ilock_flags, BREAK_WRITE);
if (error)
goto out;
}
inode_dio_wait(VFS_I(sc->ip));
/*
@ -73,6 +82,15 @@ xchk_setup_inode_bmap(
error = filemap_fdatawait_keep_errors(mapping);
if (error && (error != -ENOSPC && error != -EIO))
goto out;
/* Drop the page cache if we're repairing block mappings. */
if (is_repair) {
error = invalidate_inode_pages2(
VFS_I(sc->ip)->i_mapping);
if (error)
goto out;
}
}
/* Got the inode, lock it and we're ready to go. */

858
fs/xfs/scrub/bmap_repair.c Normal file
View File

@ -0,0 +1,858 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2018-2023 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <djwong@kernel.org>
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_btree.h"
#include "xfs_btree_staging.h"
#include "xfs_bit.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_inode.h"
#include "xfs_inode_fork.h"
#include "xfs_alloc.h"
#include "xfs_rtalloc.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_bmap_btree.h"
#include "xfs_rmap.h"
#include "xfs_rmap_btree.h"
#include "xfs_refcount.h"
#include "xfs_quota.h"
#include "xfs_ialloc.h"
#include "xfs_ag.h"
#include "xfs_reflink.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
#include "scrub/trace.h"
#include "scrub/repair.h"
#include "scrub/bitmap.h"
#include "scrub/fsb_bitmap.h"
#include "scrub/xfile.h"
#include "scrub/xfarray.h"
#include "scrub/newbt.h"
#include "scrub/reap.h"
/*
* Inode Fork Block Mapping (BMBT) Repair
* ======================================
*
* Gather all the rmap records for the inode and fork we're fixing, reset the
* incore fork, then recreate the btree.
*/
enum reflink_scan_state {
RLS_IRRELEVANT = -1, /* not applicable to this file */
RLS_UNKNOWN, /* shared extent scans required */
RLS_SET_IFLAG, /* iflag must be set */
};
struct xrep_bmap {
/* Old bmbt blocks */
struct xfsb_bitmap old_bmbt_blocks;
/* New fork. */
struct xrep_newbt new_bmapbt;
/* List of new bmap records. */
struct xfarray *bmap_records;
struct xfs_scrub *sc;
/* How many blocks did we find allocated to this file? */
xfs_rfsblock_t nblocks;
/* How many bmbt blocks did we find for this fork? */
xfs_rfsblock_t old_bmbt_block_count;
/* get_records()'s position in the free space record array. */
xfarray_idx_t array_cur;
/* How many real (non-hole, non-delalloc) mappings do we have? */
uint64_t real_mappings;
/* Which fork are we fixing? */
int whichfork;
/* What d the REFLINK flag be set when the repair is over? */
enum reflink_scan_state reflink_scan;
};
/* Is this space extent shared? Flag the inode if it is. */
STATIC int
xrep_bmap_discover_shared(
struct xrep_bmap *rb,
xfs_fsblock_t startblock,
xfs_filblks_t blockcount)
{
struct xfs_scrub *sc = rb->sc;
xfs_agblock_t agbno;
xfs_agblock_t fbno;
xfs_extlen_t flen;
int error;
agbno = XFS_FSB_TO_AGBNO(sc->mp, startblock);
error = xfs_refcount_find_shared(sc->sa.refc_cur, agbno, blockcount,
&fbno, &flen, false);
if (error)
return error;
if (fbno != NULLAGBLOCK)
rb->reflink_scan = RLS_SET_IFLAG;
return 0;
}
/* Remember this reverse-mapping as a series of bmap records. */
STATIC int
xrep_bmap_from_rmap(
struct xrep_bmap *rb,
xfs_fileoff_t startoff,
xfs_fsblock_t startblock,
xfs_filblks_t blockcount,
bool unwritten)
{
struct xfs_bmbt_irec irec = {
.br_startoff = startoff,
.br_startblock = startblock,
.br_state = unwritten ? XFS_EXT_UNWRITTEN : XFS_EXT_NORM,
};
struct xfs_bmbt_rec rbe;
struct xfs_scrub *sc = rb->sc;
int error = 0;
/*
* If we're repairing the data fork of a non-reflinked regular file on
* a reflink filesystem, we need to figure out if this space extent is
* shared.
*/
if (rb->reflink_scan == RLS_UNKNOWN && !unwritten) {
error = xrep_bmap_discover_shared(rb, startblock, blockcount);
if (error)
return error;
}
do {
xfs_failaddr_t fa;
irec.br_blockcount = min_t(xfs_filblks_t, blockcount,
XFS_MAX_BMBT_EXTLEN);
fa = xfs_bmap_validate_extent(sc->ip, rb->whichfork, &irec);
if (fa)
return -EFSCORRUPTED;
xfs_bmbt_disk_set_all(&rbe, &irec);
trace_xrep_bmap_found(sc->ip, rb->whichfork, &irec);
if (xchk_should_terminate(sc, &error))
return error;
error = xfarray_append(rb->bmap_records, &rbe);
if (error)
return error;
rb->real_mappings++;
irec.br_startblock += irec.br_blockcount;
irec.br_startoff += irec.br_blockcount;
blockcount -= irec.br_blockcount;
} while (blockcount > 0);
return 0;
}
/* Check for any obvious errors or conflicts in the file mapping. */
STATIC int
xrep_bmap_check_fork_rmap(
struct xrep_bmap *rb,
struct xfs_btree_cur *cur,
const struct xfs_rmap_irec *rec)
{
struct xfs_scrub *sc = rb->sc;
enum xbtree_recpacking outcome;
int error;
/*
* Data extents for rt files are never stored on the data device, but
* everything else (xattrs, bmbt blocks) can be.
*/
if (XFS_IS_REALTIME_INODE(sc->ip) &&
!(rec->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK)))
return -EFSCORRUPTED;
/* Check that this is within the AG. */
if (!xfs_verify_agbext(cur->bc_ag.pag, rec->rm_startblock,
rec->rm_blockcount))
return -EFSCORRUPTED;
/* Check the file offset range. */
if (!(rec->rm_flags & XFS_RMAP_BMBT_BLOCK) &&
!xfs_verify_fileext(sc->mp, rec->rm_offset, rec->rm_blockcount))
return -EFSCORRUPTED;
/* No contradictory flags. */
if ((rec->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK)) &&
(rec->rm_flags & XFS_RMAP_UNWRITTEN))
return -EFSCORRUPTED;
/* Make sure this isn't free space. */
error = xfs_alloc_has_records(sc->sa.bno_cur, rec->rm_startblock,
rec->rm_blockcount, &outcome);
if (error)
return error;
if (outcome != XBTREE_RECPACKING_EMPTY)
return -EFSCORRUPTED;
/* Must not be an inode chunk. */
error = xfs_ialloc_has_inodes_at_extent(sc->sa.ino_cur,
rec->rm_startblock, rec->rm_blockcount, &outcome);
if (error)
return error;
if (outcome != XBTREE_RECPACKING_EMPTY)
return -EFSCORRUPTED;
return 0;
}
/* Record extents that belong to this inode's fork. */
STATIC int
xrep_bmap_walk_rmap(
struct xfs_btree_cur *cur,
const struct xfs_rmap_irec *rec,
void *priv)
{
struct xrep_bmap *rb = priv;
struct xfs_mount *mp = cur->bc_mp;
xfs_fsblock_t fsbno;
int error = 0;
if (xchk_should_terminate(rb->sc, &error))
return error;
if (rec->rm_owner != rb->sc->ip->i_ino)
return 0;
error = xrep_bmap_check_fork_rmap(rb, cur, rec);
if (error)
return error;
/*
* Record all blocks allocated to this file even if the extent isn't
* for the fork we're rebuilding so that we can reset di_nblocks later.
*/
rb->nblocks += rec->rm_blockcount;
/* If this rmap isn't for the fork we want, we're done. */
if (rb->whichfork == XFS_DATA_FORK &&
(rec->rm_flags & XFS_RMAP_ATTR_FORK))
return 0;
if (rb->whichfork == XFS_ATTR_FORK &&
!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
return 0;
fsbno = XFS_AGB_TO_FSB(mp, cur->bc_ag.pag->pag_agno,
rec->rm_startblock);
if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK) {
rb->old_bmbt_block_count += rec->rm_blockcount;
return xfsb_bitmap_set(&rb->old_bmbt_blocks, fsbno,
rec->rm_blockcount);
}
return xrep_bmap_from_rmap(rb, rec->rm_offset, fsbno,
rec->rm_blockcount,
rec->rm_flags & XFS_RMAP_UNWRITTEN);
}
/*
* Compare two block mapping records. We want to sort in order of increasing
* file offset.
*/
static int
xrep_bmap_extent_cmp(
const void *a,
const void *b)
{
const struct xfs_bmbt_rec *ba = a;
const struct xfs_bmbt_rec *bb = b;
xfs_fileoff_t ao = xfs_bmbt_disk_get_startoff(ba);
xfs_fileoff_t bo = xfs_bmbt_disk_get_startoff(bb);
if (ao > bo)
return 1;
else if (ao < bo)
return -1;
return 0;
}
/*
* Sort the bmap extents by fork offset or else the records will be in the
* wrong order. Ensure there are no overlaps in the file offset ranges.
*/
STATIC int
xrep_bmap_sort_records(
struct xrep_bmap *rb)
{
struct xfs_bmbt_irec irec;
xfs_fileoff_t next_off = 0;
xfarray_idx_t array_cur;
int error;
error = xfarray_sort(rb->bmap_records, xrep_bmap_extent_cmp,
XFARRAY_SORT_KILLABLE);
if (error)
return error;
foreach_xfarray_idx(rb->bmap_records, array_cur) {
struct xfs_bmbt_rec rec;
if (xchk_should_terminate(rb->sc, &error))
return error;
error = xfarray_load(rb->bmap_records, array_cur, &rec);
if (error)
return error;
xfs_bmbt_disk_get_all(&rec, &irec);
if (irec.br_startoff < next_off)
return -EFSCORRUPTED;
next_off = irec.br_startoff + irec.br_blockcount;
}
return 0;
}
/* Scan one AG for reverse mappings that we can turn into extent maps. */
STATIC int
xrep_bmap_scan_ag(
struct xrep_bmap *rb,
struct xfs_perag *pag)
{
struct xfs_scrub *sc = rb->sc;
int error;
error = xrep_ag_init(sc, pag, &sc->sa);
if (error)
return error;
error = xfs_rmap_query_all(sc->sa.rmap_cur, xrep_bmap_walk_rmap, rb);
xchk_ag_free(sc, &sc->sa);
return error;
}
/* Find the delalloc extents from the old incore extent tree. */
STATIC int
xrep_bmap_find_delalloc(
struct xrep_bmap *rb)
{
struct xfs_bmbt_irec irec;
struct xfs_iext_cursor icur;
struct xfs_bmbt_rec rbe;
struct xfs_inode *ip = rb->sc->ip;
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, rb->whichfork);
int error = 0;
/*
* Skip this scan if we don't expect to find delayed allocation
* reservations in this fork.
*/
if (rb->whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0)
return 0;
for_each_xfs_iext(ifp, &icur, &irec) {
if (!isnullstartblock(irec.br_startblock))
continue;
xfs_bmbt_disk_set_all(&rbe, &irec);
trace_xrep_bmap_found(ip, rb->whichfork, &irec);
if (xchk_should_terminate(rb->sc, &error))
return error;
error = xfarray_append(rb->bmap_records, &rbe);
if (error)
return error;
}
return 0;
}
/*
* Collect block mappings for this fork of this inode and decide if we have
* enough space to rebuild. Caller is responsible for cleaning up the list if
* anything goes wrong.
*/
STATIC int
xrep_bmap_find_mappings(
struct xrep_bmap *rb)
{
struct xfs_scrub *sc = rb->sc;
struct xfs_perag *pag;
xfs_agnumber_t agno;
int error = 0;
/* Iterate the rmaps for extents. */
for_each_perag(sc->mp, agno, pag) {
error = xrep_bmap_scan_ag(rb, pag);
if (error) {
xfs_perag_rele(pag);
return error;
}
}
return xrep_bmap_find_delalloc(rb);
}
/* Retrieve real extent mappings for bulk loading the bmap btree. */
STATIC int
xrep_bmap_get_records(
struct xfs_btree_cur *cur,
unsigned int idx,
struct xfs_btree_block *block,
unsigned int nr_wanted,
void *priv)
{
struct xfs_bmbt_rec rec;
struct xfs_bmbt_irec *irec = &cur->bc_rec.b;
struct xrep_bmap *rb = priv;
union xfs_btree_rec *block_rec;
unsigned int loaded;
int error;
for (loaded = 0; loaded < nr_wanted; loaded++, idx++) {
do {
error = xfarray_load(rb->bmap_records, rb->array_cur++,
&rec);
if (error)
return error;
xfs_bmbt_disk_get_all(&rec, irec);
} while (isnullstartblock(irec->br_startblock));
block_rec = xfs_btree_rec_addr(cur, idx, block);
cur->bc_ops->init_rec_from_cur(cur, block_rec);
}
return loaded;
}
/* Feed one of the new btree blocks to the bulk loader. */
STATIC int
xrep_bmap_claim_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr,
void *priv)
{
struct xrep_bmap *rb = priv;
return xrep_newbt_claim_block(cur, &rb->new_bmapbt, ptr);
}
/* Figure out how much space we need to create the incore btree root block. */
STATIC size_t
xrep_bmap_iroot_size(
struct xfs_btree_cur *cur,
unsigned int level,
unsigned int nr_this_level,
void *priv)
{
ASSERT(level > 0);
return XFS_BMAP_BROOT_SPACE_CALC(cur->bc_mp, nr_this_level);
}
/* Update the inode counters. */
STATIC int
xrep_bmap_reset_counters(
struct xrep_bmap *rb)
{
struct xfs_scrub *sc = rb->sc;
struct xbtree_ifakeroot *ifake = &rb->new_bmapbt.ifake;
int64_t delta;
if (rb->reflink_scan == RLS_SET_IFLAG)
sc->ip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
/*
* Update the inode block counts to reflect the extents we found in the
* rmapbt.
*/
delta = ifake->if_blocks - rb->old_bmbt_block_count;
sc->ip->i_nblocks = rb->nblocks + delta;
xfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE);
/*
* Adjust the quota counts by the difference in size between the old
* and new bmbt.
*/
xfs_trans_mod_dquot_byino(sc->tp, sc->ip, XFS_TRANS_DQ_BCOUNT, delta);
return 0;
}
/*
* Create a new iext tree and load it with block mappings. If the inode is
* in extents format, that's all we need to do to commit the new mappings.
* If it is in btree format, this takes care of preloading the incore tree.
*/
STATIC int
xrep_bmap_extents_load(
struct xrep_bmap *rb)
{
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec irec;
struct xfs_ifork *ifp = rb->new_bmapbt.ifake.if_fork;
xfarray_idx_t array_cur;
int error;
ASSERT(ifp->if_bytes == 0);
/* Add all the mappings (incl. delalloc) to the incore extent tree. */
xfs_iext_first(ifp, &icur);
foreach_xfarray_idx(rb->bmap_records, array_cur) {
struct xfs_bmbt_rec rec;
error = xfarray_load(rb->bmap_records, array_cur, &rec);
if (error)
return error;
xfs_bmbt_disk_get_all(&rec, &irec);
xfs_iext_insert_raw(ifp, &icur, &irec);
if (!isnullstartblock(irec.br_startblock))
ifp->if_nextents++;
xfs_iext_next(ifp, &icur);
}
return xrep_ino_ensure_extent_count(rb->sc, rb->whichfork,
ifp->if_nextents);
}
/*
* Reserve new btree blocks, bulk load the bmap records into the ondisk btree,
* and load the incore extent tree.
*/
STATIC int
xrep_bmap_btree_load(
struct xrep_bmap *rb,
struct xfs_btree_cur *bmap_cur)
{
struct xfs_scrub *sc = rb->sc;
int error;
/* Compute how many blocks we'll need. */
error = xfs_btree_bload_compute_geometry(bmap_cur,
&rb->new_bmapbt.bload, rb->real_mappings);
if (error)
return error;
/* Last chance to abort before we start committing fixes. */
if (xchk_should_terminate(sc, &error))
return error;
/*
* Guess how many blocks we're going to need to rebuild an entire bmap
* from the number of extents we found, and pump up our transaction to
* have sufficient block reservation. We're allowed to exceed file
* quota to repair inconsistent metadata.
*/
error = xfs_trans_reserve_more_inode(sc->tp, sc->ip,
rb->new_bmapbt.bload.nr_blocks, 0, true);
if (error)
return error;
/* Reserve the space we'll need for the new btree. */
error = xrep_newbt_alloc_blocks(&rb->new_bmapbt,
rb->new_bmapbt.bload.nr_blocks);
if (error)
return error;
/* Add all observed bmap records. */
rb->array_cur = XFARRAY_CURSOR_INIT;
error = xfs_btree_bload(bmap_cur, &rb->new_bmapbt.bload, rb);
if (error)
return error;
/*
* Load the new bmap records into the new incore extent tree to
* preserve delalloc reservations for regular files. The directory
* code loads the extent tree during xfs_dir_open and assumes
* thereafter that it remains loaded, so we must not violate that
* assumption.
*/
return xrep_bmap_extents_load(rb);
}
/*
* Use the collected bmap information to stage a new bmap fork. If this is
* successful we'll return with the new fork information logged to the repair
* transaction but not yet committed. The caller must ensure that the inode
* is joined to the transaction; the inode will be joined to a clean
* transaction when the function returns.
*/
STATIC int
xrep_bmap_build_new_fork(
struct xrep_bmap *rb)
{
struct xfs_owner_info oinfo;
struct xfs_scrub *sc = rb->sc;
struct xfs_btree_cur *bmap_cur;
struct xbtree_ifakeroot *ifake = &rb->new_bmapbt.ifake;
int error;
error = xrep_bmap_sort_records(rb);
if (error)
return error;
/*
* Prepare to construct the new fork by initializing the new btree
* structure and creating a fake ifork in the ifakeroot structure.
*/
xfs_rmap_ino_bmbt_owner(&oinfo, sc->ip->i_ino, rb->whichfork);
error = xrep_newbt_init_inode(&rb->new_bmapbt, sc, rb->whichfork,
&oinfo);
if (error)
return error;
rb->new_bmapbt.bload.get_records = xrep_bmap_get_records;
rb->new_bmapbt.bload.claim_block = xrep_bmap_claim_block;
rb->new_bmapbt.bload.iroot_size = xrep_bmap_iroot_size;
bmap_cur = xfs_bmbt_stage_cursor(sc->mp, sc->ip, ifake);
/*
* Figure out the size and format of the new fork, then fill it with
* all the bmap records we've found. Join the inode to the transaction
* so that we can roll the transaction while holding the inode locked.
*/
if (rb->real_mappings <= XFS_IFORK_MAXEXT(sc->ip, rb->whichfork)) {
ifake->if_fork->if_format = XFS_DINODE_FMT_EXTENTS;
error = xrep_bmap_extents_load(rb);
} else {
ifake->if_fork->if_format = XFS_DINODE_FMT_BTREE;
error = xrep_bmap_btree_load(rb, bmap_cur);
}
if (error)
goto err_cur;
/*
* Install the new fork in the inode. After this point the old mapping
* data are no longer accessible and the new tree is live. We delete
* the cursor immediately after committing the staged root because the
* staged fork might be in extents format.
*/
xfs_bmbt_commit_staged_btree(bmap_cur, sc->tp, rb->whichfork);
xfs_btree_del_cursor(bmap_cur, 0);
/* Reset the inode counters now that we've changed the fork. */
error = xrep_bmap_reset_counters(rb);
if (error)
goto err_newbt;
/* Dispose of any unused blocks and the accounting information. */
error = xrep_newbt_commit(&rb->new_bmapbt);
if (error)
return error;
return xrep_roll_trans(sc);
err_cur:
if (bmap_cur)
xfs_btree_del_cursor(bmap_cur, error);
err_newbt:
xrep_newbt_cancel(&rb->new_bmapbt);
return error;
}
/*
* Now that we've logged the new inode btree, invalidate all of the old blocks
* and free them, if there were any.
*/
STATIC int
xrep_bmap_remove_old_tree(
struct xrep_bmap *rb)
{
struct xfs_scrub *sc = rb->sc;
struct xfs_owner_info oinfo;
/* Free the old bmbt blocks if they're not in use. */
xfs_rmap_ino_bmbt_owner(&oinfo, sc->ip->i_ino, rb->whichfork);
return xrep_reap_fsblocks(sc, &rb->old_bmbt_blocks, &oinfo);
}
/* Check for garbage inputs. Returns -ECANCELED if there's nothing to do. */
STATIC int
xrep_bmap_check_inputs(
struct xfs_scrub *sc,
int whichfork)
{
struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork);
ASSERT(whichfork == XFS_DATA_FORK || whichfork == XFS_ATTR_FORK);
if (!xfs_has_rmapbt(sc->mp))
return -EOPNOTSUPP;
/* No fork means nothing to rebuild. */
if (!ifp)
return -ECANCELED;
/*
* We only know how to repair extent mappings, which is to say that we
* only support extents and btree fork format. Repairs to a local
* format fork require a higher level repair function, so we do not
* have any work to do here.
*/
switch (ifp->if_format) {
case XFS_DINODE_FMT_DEV:
case XFS_DINODE_FMT_LOCAL:
case XFS_DINODE_FMT_UUID:
return -ECANCELED;
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
break;
default:
return -EFSCORRUPTED;
}
if (whichfork == XFS_ATTR_FORK)
return 0;
/* Only files, symlinks, and directories get to have data forks. */
switch (VFS_I(sc->ip)->i_mode & S_IFMT) {
case S_IFREG:
case S_IFDIR:
case S_IFLNK:
/* ok */
break;
default:
return -EINVAL;
}
/* Don't know how to rebuild realtime data forks. */
if (XFS_IS_REALTIME_INODE(sc->ip))
return -EOPNOTSUPP;
return 0;
}
/* Set up the initial state of the reflink scan. */
static inline enum reflink_scan_state
xrep_bmap_init_reflink_scan(
struct xfs_scrub *sc,
int whichfork)
{
/* cannot share on non-reflink filesystem */
if (!xfs_has_reflink(sc->mp))
return RLS_IRRELEVANT;
/* preserve flag if it's already set */
if (xfs_is_reflink_inode(sc->ip))
return RLS_SET_IFLAG;
/* can only share regular files */
if (!S_ISREG(VFS_I(sc->ip)->i_mode))
return RLS_IRRELEVANT;
/* cannot share attr fork extents */
if (whichfork != XFS_DATA_FORK)
return RLS_IRRELEVANT;
/* cannot share realtime extents */
if (XFS_IS_REALTIME_INODE(sc->ip))
return RLS_IRRELEVANT;
return RLS_UNKNOWN;
}
/* Repair an inode fork. */
STATIC int
xrep_bmap(
struct xfs_scrub *sc,
int whichfork)
{
struct xrep_bmap *rb;
char *descr;
unsigned int max_bmbt_recs;
bool large_extcount;
int error = 0;
error = xrep_bmap_check_inputs(sc, whichfork);
if (error == -ECANCELED)
return 0;
if (error)
return error;
rb = kzalloc(sizeof(struct xrep_bmap), XCHK_GFP_FLAGS);
if (!rb)
return -ENOMEM;
rb->sc = sc;
rb->whichfork = whichfork;
rb->reflink_scan = xrep_bmap_init_reflink_scan(sc, whichfork);
/* Set up enough storage to handle the max records for this fork. */
large_extcount = xfs_has_large_extent_counts(sc->mp);
max_bmbt_recs = xfs_iext_max_nextents(large_extcount, whichfork);
descr = xchk_xfile_ino_descr(sc, "%s fork mapping records",
whichfork == XFS_DATA_FORK ? "data" : "attr");
error = xfarray_create(descr, max_bmbt_recs,
sizeof(struct xfs_bmbt_rec), &rb->bmap_records);
kfree(descr);
if (error)
goto out_rb;
/* Collect all reverse mappings for this fork's extents. */
xfsb_bitmap_init(&rb->old_bmbt_blocks);
error = xrep_bmap_find_mappings(rb);
if (error)
goto out_bitmap;
xfs_trans_ijoin(sc->tp, sc->ip, 0);
/* Rebuild the bmap information. */
error = xrep_bmap_build_new_fork(rb);
if (error)
goto out_bitmap;
/* Kill the old tree. */
error = xrep_bmap_remove_old_tree(rb);
if (error)
goto out_bitmap;
out_bitmap:
xfsb_bitmap_destroy(&rb->old_bmbt_blocks);
xfarray_destroy(rb->bmap_records);
out_rb:
kfree(rb);
return error;
}
/* Repair an inode's data fork. */
int
xrep_bmap_data(
struct xfs_scrub *sc)
{
return xrep_bmap(sc, XFS_DATA_FORK);
}
/* Repair an inode's attr fork. */
int
xrep_bmap_attr(
struct xfs_scrub *sc)
{
return xrep_bmap(sc, XFS_ATTR_FORK);
}

View File

@ -239,7 +239,11 @@ int xchk_metadata_inode_forks(struct xfs_scrub *sc);
(sc)->mp->m_super->s_id, \
(sc)->sa.pag ? (sc)->sa.pag->pag_agno : (sc)->sm->sm_agno, \
##__VA_ARGS__)
#define xchk_xfile_ino_descr(sc, fmt, ...) \
kasprintf(XCHK_GFP_FLAGS, "XFS (%s): inode 0x%llx " fmt, \
(sc)->mp->m_super->s_id, \
(sc)->ip ? (sc)->ip->i_ino : (sc)->sm->sm_ino, \
##__VA_ARGS__)
/*
* Setting up a hook to wait for intents to drain is costly -- we have to take

614
fs/xfs/scrub/cow_repair.c Normal file
View File

@ -0,0 +1,614 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2022-2023 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <djwong@kernel.org>
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_btree.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_inode.h"
#include "xfs_inode_fork.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_rmap.h"
#include "xfs_refcount.h"
#include "xfs_quota.h"
#include "xfs_ialloc.h"
#include "xfs_ag.h"
#include "xfs_error.h"
#include "xfs_errortag.h"
#include "xfs_icache.h"
#include "xfs_refcount_btree.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
#include "scrub/repair.h"
#include "scrub/bitmap.h"
#include "scrub/off_bitmap.h"
#include "scrub/fsb_bitmap.h"
#include "scrub/reap.h"
/*
* CoW Fork Mapping Repair
* =======================
*
* Although CoW staging extents are owned by incore CoW inode forks, on disk
* they are owned by the refcount btree. The ondisk metadata does not record
* any ownership information, which limits what we can do to repair the
* mappings in the CoW fork. At most, we can replace ifork mappings that lack
* an entry in the refcount btree or are described by a reverse mapping record
* whose owner is not OWN_COW.
*
* Replacing extents is also tricky -- we can't touch written CoW fork extents
* since they are undergoing writeback, and delalloc extents do not require
* repair since they only exist incore. Hence the most we can do is find the
* bad parts of unwritten mappings, allocate a replacement set of blocks, and
* replace the incore mapping. We use the regular reaping process to unmap
* or free the discarded blocks, as appropriate.
*/
struct xrep_cow {
struct xfs_scrub *sc;
/* Bitmap of file offset ranges that need replacing. */
struct xoff_bitmap bad_fileoffs;
/* Bitmap of fsblocks that were removed from the CoW fork. */
struct xfsb_bitmap old_cowfork_fsblocks;
/* CoW fork mappings used to scan for bad CoW staging extents. */
struct xfs_bmbt_irec irec;
/* refcount btree block number of irec.br_startblock */
unsigned int irec_startbno;
/* refcount btree block number of the next refcount record we expect */
unsigned int next_bno;
};
/* CoW staging extent. */
struct xrep_cow_extent {
xfs_fsblock_t fsbno;
xfs_extlen_t len;
};
/*
* Mark the part of the file range that corresponds to the given physical
* space. Caller must ensure that the physical range is within xc->irec.
*/
STATIC int
xrep_cow_mark_file_range(
struct xrep_cow *xc,
xfs_fsblock_t startblock,
xfs_filblks_t blockcount)
{
xfs_fileoff_t startoff;
startoff = xc->irec.br_startoff +
(startblock - xc->irec.br_startblock);
trace_xrep_cow_mark_file_range(xc->sc->ip, startblock, startoff,
blockcount);
return xoff_bitmap_set(&xc->bad_fileoffs, startoff, blockcount);
}
/*
* Trim @src to fit within the CoW fork mapping being examined, and put the
* result in @dst.
*/
static inline void
xrep_cow_trim_refcount(
struct xrep_cow *xc,
struct xfs_refcount_irec *dst,
const struct xfs_refcount_irec *src)
{
unsigned int adj;
memcpy(dst, src, sizeof(*dst));
if (dst->rc_startblock < xc->irec_startbno) {
adj = xc->irec_startbno - dst->rc_startblock;
dst->rc_blockcount -= adj;
dst->rc_startblock += adj;
}
if (dst->rc_startblock + dst->rc_blockcount >
xc->irec_startbno + xc->irec.br_blockcount) {
adj = (dst->rc_startblock + dst->rc_blockcount) -
(xc->irec_startbno + xc->irec.br_blockcount);
dst->rc_blockcount -= adj;
}
}
/* Mark any shared CoW staging extents. */
STATIC int
xrep_cow_mark_shared_staging(
struct xfs_btree_cur *cur,
const struct xfs_refcount_irec *rec,
void *priv)
{
struct xrep_cow *xc = priv;
struct xfs_refcount_irec rrec;
xfs_fsblock_t fsbno;
if (!xfs_refcount_check_domain(rec) ||
rec->rc_domain != XFS_REFC_DOMAIN_SHARED)
return -EFSCORRUPTED;
xrep_cow_trim_refcount(xc, &rrec, rec);
fsbno = XFS_AGB_TO_FSB(xc->sc->mp, cur->bc_ag.pag->pag_agno,
rrec.rc_startblock);
return xrep_cow_mark_file_range(xc, fsbno, rrec.rc_blockcount);
}
/*
* Mark any portion of the CoW fork file offset range where there is not a CoW
* staging extent record in the refcountbt, and keep a record of where we did
* find correct refcountbt records. Staging records are always cleaned out at
* mount time, so any two inodes trying to map the same staging area would have
* already taken the fs down due to refcount btree verifier errors. Hence this
* inode should be the sole creator of the staging extent records ondisk.
*/
STATIC int
xrep_cow_mark_missing_staging(
struct xfs_btree_cur *cur,
const struct xfs_refcount_irec *rec,
void *priv)
{
struct xrep_cow *xc = priv;
struct xfs_refcount_irec rrec;
int error;
if (!xfs_refcount_check_domain(rec) ||
rec->rc_domain != XFS_REFC_DOMAIN_COW)
return -EFSCORRUPTED;
xrep_cow_trim_refcount(xc, &rrec, rec);
if (xc->next_bno >= rrec.rc_startblock)
goto next;
error = xrep_cow_mark_file_range(xc,
XFS_AGB_TO_FSB(xc->sc->mp, cur->bc_ag.pag->pag_agno,
xc->next_bno),
rrec.rc_startblock - xc->next_bno);
if (error)
return error;
next:
xc->next_bno = rrec.rc_startblock + rrec.rc_blockcount;
return 0;
}
/*
* Mark any area that does not correspond to a CoW staging rmap. These are
* cross-linked areas that must be avoided.
*/
STATIC int
xrep_cow_mark_missing_staging_rmap(
struct xfs_btree_cur *cur,
const struct xfs_rmap_irec *rec,
void *priv)
{
struct xrep_cow *xc = priv;
xfs_fsblock_t fsbno;
xfs_agblock_t rec_bno;
xfs_extlen_t rec_len;
unsigned int adj;
if (rec->rm_owner == XFS_RMAP_OWN_COW)
return 0;
rec_bno = rec->rm_startblock;
rec_len = rec->rm_blockcount;
if (rec_bno < xc->irec_startbno) {
adj = xc->irec_startbno - rec_bno;
rec_len -= adj;
rec_bno += adj;
}
if (rec_bno + rec_len > xc->irec_startbno + xc->irec.br_blockcount) {
adj = (rec_bno + rec_len) -
(xc->irec_startbno + xc->irec.br_blockcount);
rec_len -= adj;
}
fsbno = XFS_AGB_TO_FSB(xc->sc->mp, cur->bc_ag.pag->pag_agno, rec_bno);
return xrep_cow_mark_file_range(xc, fsbno, rec_len);
}
/*
* Find any part of the CoW fork mapping that isn't a single-owner CoW staging
* extent and mark the corresponding part of the file range in the bitmap.
*/
STATIC int
xrep_cow_find_bad(
struct xrep_cow *xc)
{
struct xfs_refcount_irec rc_low = { 0 };
struct xfs_refcount_irec rc_high = { 0 };
struct xfs_rmap_irec rm_low = { 0 };
struct xfs_rmap_irec rm_high = { 0 };
struct xfs_perag *pag;
struct xfs_scrub *sc = xc->sc;
xfs_agnumber_t agno;
int error;
agno = XFS_FSB_TO_AGNO(sc->mp, xc->irec.br_startblock);
xc->irec_startbno = XFS_FSB_TO_AGBNO(sc->mp, xc->irec.br_startblock);
pag = xfs_perag_get(sc->mp, agno);
if (!pag)
return -EFSCORRUPTED;
error = xrep_ag_init(sc, pag, &sc->sa);
if (error)
goto out_pag;
/* Mark any CoW fork extents that are shared. */
rc_low.rc_startblock = xc->irec_startbno;
rc_high.rc_startblock = xc->irec_startbno + xc->irec.br_blockcount - 1;
rc_low.rc_domain = rc_high.rc_domain = XFS_REFC_DOMAIN_SHARED;
error = xfs_refcount_query_range(sc->sa.refc_cur, &rc_low, &rc_high,
xrep_cow_mark_shared_staging, xc);
if (error)
goto out_sa;
/* Make sure there are CoW staging extents for the whole mapping. */
rc_low.rc_startblock = xc->irec_startbno;
rc_high.rc_startblock = xc->irec_startbno + xc->irec.br_blockcount - 1;
rc_low.rc_domain = rc_high.rc_domain = XFS_REFC_DOMAIN_COW;
xc->next_bno = xc->irec_startbno;
error = xfs_refcount_query_range(sc->sa.refc_cur, &rc_low, &rc_high,
xrep_cow_mark_missing_staging, xc);
if (error)
goto out_sa;
if (xc->next_bno < xc->irec_startbno + xc->irec.br_blockcount) {
error = xrep_cow_mark_file_range(xc,
XFS_AGB_TO_FSB(sc->mp, pag->pag_agno,
xc->next_bno),
xc->irec_startbno + xc->irec.br_blockcount -
xc->next_bno);
if (error)
goto out_sa;
}
/* Mark any area has an rmap that isn't a COW staging extent. */
rm_low.rm_startblock = xc->irec_startbno;
memset(&rm_high, 0xFF, sizeof(rm_high));
rm_high.rm_startblock = xc->irec_startbno + xc->irec.br_blockcount - 1;
error = xfs_rmap_query_range(sc->sa.rmap_cur, &rm_low, &rm_high,
xrep_cow_mark_missing_staging_rmap, xc);
if (error)
goto out_sa;
/*
* If userspace is forcing us to rebuild the CoW fork or someone turned
* on the debugging knob, replace everything in the CoW fork.
*/
if ((sc->sm->sm_flags & XFS_SCRUB_IFLAG_FORCE_REBUILD) ||
XFS_TEST_ERROR(false, sc->mp, XFS_ERRTAG_FORCE_SCRUB_REPAIR)) {
error = xrep_cow_mark_file_range(xc, xc->irec.br_startblock,
xc->irec.br_blockcount);
if (error)
return error;
}
out_sa:
xchk_ag_free(sc, &sc->sa);
out_pag:
xfs_perag_put(pag);
return 0;
}
/*
* Allocate a replacement CoW staging extent of up to the given number of
* blocks, and fill out the mapping.
*/
STATIC int
xrep_cow_alloc(
struct xfs_scrub *sc,
xfs_extlen_t maxlen,
struct xrep_cow_extent *repl)
{
struct xfs_alloc_arg args = {
.tp = sc->tp,
.mp = sc->mp,
.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE,
.minlen = 1,
.maxlen = maxlen,
.prod = 1,
.resv = XFS_AG_RESV_NONE,
.datatype = XFS_ALLOC_USERDATA,
};
int error;
error = xfs_trans_reserve_more(sc->tp, maxlen, 0);
if (error)
return error;
error = xfs_alloc_vextent_start_ag(&args,
XFS_INO_TO_FSB(sc->mp, sc->ip->i_ino));
if (error)
return error;
if (args.fsbno == NULLFSBLOCK)
return -ENOSPC;
xfs_refcount_alloc_cow_extent(sc->tp, args.fsbno, args.len);
repl->fsbno = args.fsbno;
repl->len = args.len;
return 0;
}
/*
* Look up the current CoW fork mapping so that we only allocate enough to
* replace a single mapping. If we don't find a mapping that covers the start
* of the file range, or we find a delalloc or written extent, something is
* seriously wrong, since we didn't drop the ILOCK.
*/
static inline int
xrep_cow_find_mapping(
struct xrep_cow *xc,
struct xfs_iext_cursor *icur,
xfs_fileoff_t startoff,
struct xfs_bmbt_irec *got)
{
struct xfs_inode *ip = xc->sc->ip;
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
if (!xfs_iext_lookup_extent(ip, ifp, startoff, icur, got))
goto bad;
if (got->br_startoff > startoff)
goto bad;
if (got->br_blockcount == 0)
goto bad;
if (isnullstartblock(got->br_startblock))
goto bad;
if (xfs_bmap_is_written_extent(got))
goto bad;
return 0;
bad:
ASSERT(0);
return -EFSCORRUPTED;
}
#define REPLACE_LEFT_SIDE (1U << 0)
#define REPLACE_RIGHT_SIDE (1U << 1)
/*
* Given a CoW fork mapping @got and a replacement mapping @repl, remap the
* beginning of @got with the space described by @rep.
*/
static inline void
xrep_cow_replace_mapping(
struct xfs_inode *ip,
struct xfs_iext_cursor *icur,
const struct xfs_bmbt_irec *got,
const struct xrep_cow_extent *repl)
{
struct xfs_bmbt_irec new = *got; /* struct copy */
ASSERT(repl->len > 0);
ASSERT(!isnullstartblock(got->br_startblock));
trace_xrep_cow_replace_mapping(ip, got, repl->fsbno, repl->len);
if (got->br_blockcount == repl->len) {
/*
* The new extent is a complete replacement for the existing
* extent. Update the COW fork record.
*/
new.br_startblock = repl->fsbno;
xfs_iext_update_extent(ip, BMAP_COWFORK, icur, &new);
return;
}
/*
* The new extent can replace the beginning of the COW fork record.
* Move the left side of @got upwards, then insert the new record.
*/
new.br_startoff += repl->len;
new.br_startblock += repl->len;
new.br_blockcount -= repl->len;
xfs_iext_update_extent(ip, BMAP_COWFORK, icur, &new);
new.br_startoff = got->br_startoff;
new.br_startblock = repl->fsbno;
new.br_blockcount = repl->len;
xfs_iext_insert(ip, icur, &new, BMAP_COWFORK);
}
/*
* Replace the unwritten CoW staging extent backing the given file range with a
* new space extent that isn't as problematic.
*/
STATIC int
xrep_cow_replace_range(
struct xrep_cow *xc,
xfs_fileoff_t startoff,
xfs_extlen_t *blockcount)
{
struct xfs_iext_cursor icur;
struct xrep_cow_extent repl;
struct xfs_bmbt_irec got;
struct xfs_scrub *sc = xc->sc;
xfs_fileoff_t nextoff;
xfs_extlen_t alloc_len;
int error;
/*
* Put the existing CoW fork mapping in @got. If @got ends before
* @rep, truncate @rep so we only replace one extent mapping at a time.
*/
error = xrep_cow_find_mapping(xc, &icur, startoff, &got);
if (error)
return error;
nextoff = min(startoff + *blockcount,
got.br_startoff + got.br_blockcount);
/*
* Allocate a replacement extent. If we don't fill all the blocks,
* shorten the quantity that will be deleted in this step.
*/
alloc_len = min_t(xfs_fileoff_t, XFS_MAX_BMBT_EXTLEN,
nextoff - startoff);
error = xrep_cow_alloc(sc, alloc_len, &repl);
if (error)
return error;
/*
* Replace the old mapping with the new one, and commit the metadata
* changes made so far.
*/
xrep_cow_replace_mapping(sc->ip, &icur, &got, &repl);
xfs_inode_set_cowblocks_tag(sc->ip);
error = xfs_defer_finish(&sc->tp);
if (error)
return error;
/* Note the old CoW staging extents; we'll reap them all later. */
error = xfsb_bitmap_set(&xc->old_cowfork_fsblocks, got.br_startblock,
repl.len);
if (error)
return error;
*blockcount = repl.len;
return 0;
}
/*
* Replace a bad part of an unwritten CoW staging extent with a fresh delalloc
* reservation.
*/
STATIC int
xrep_cow_replace(
uint64_t startoff,
uint64_t blockcount,
void *priv)
{
struct xrep_cow *xc = priv;
int error = 0;
while (blockcount > 0) {
xfs_extlen_t len = min_t(xfs_filblks_t, blockcount,
XFS_MAX_BMBT_EXTLEN);
error = xrep_cow_replace_range(xc, startoff, &len);
if (error)
break;
blockcount -= len;
startoff += len;
}
return error;
}
/*
* Repair an inode's CoW fork. The CoW fork is an in-core structure, so
* there's no btree to rebuid. Instead, we replace any mappings that are
* cross-linked or lack ondisk CoW fork records in the refcount btree.
*/
int
xrep_bmap_cow(
struct xfs_scrub *sc)
{
struct xrep_cow *xc;
struct xfs_iext_cursor icur;
struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, XFS_COW_FORK);
int error;
if (!xfs_has_rmapbt(sc->mp) || !xfs_has_reflink(sc->mp))
return -EOPNOTSUPP;
if (!ifp)
return 0;
/* realtime files aren't supported yet */
if (XFS_IS_REALTIME_INODE(sc->ip))
return -EOPNOTSUPP;
/*
* If we're somehow not in extents format, then reinitialize it to
* an empty extent mapping fork and exit.
*/
if (ifp->if_format != XFS_DINODE_FMT_EXTENTS) {
ifp->if_format = XFS_DINODE_FMT_EXTENTS;
ifp->if_nextents = 0;
return 0;
}
xc = kzalloc(sizeof(struct xrep_cow), XCHK_GFP_FLAGS);
if (!xc)
return -ENOMEM;
xfs_trans_ijoin(sc->tp, sc->ip, 0);
xc->sc = sc;
xoff_bitmap_init(&xc->bad_fileoffs);
xfsb_bitmap_init(&xc->old_cowfork_fsblocks);
for_each_xfs_iext(ifp, &icur, &xc->irec) {
if (xchk_should_terminate(sc, &error))
goto out_bitmap;
/*
* delalloc reservations only exist incore, so there is no
* ondisk metadata that we can examine. Hence we leave them
* alone.
*/
if (isnullstartblock(xc->irec.br_startblock))
continue;
/*
* COW fork extents are only in the written state if writeback
* is actively writing to disk. We cannot restart the write
* at a different disk address since we've already issued the
* IO, so we leave these alone and hope for the best.
*/
if (xfs_bmap_is_written_extent(&xc->irec))
continue;
error = xrep_cow_find_bad(xc);
if (error)
goto out_bitmap;
}
/* Replace any bad unwritten mappings with fresh reservations. */
error = xoff_bitmap_walk(&xc->bad_fileoffs, xrep_cow_replace, xc);
if (error)
goto out_bitmap;
/*
* Reap as many of the old CoW blocks as we can. They are owned ondisk
* by the refcount btree, not the inode, so it is correct to treat them
* like inode metadata.
*/
error = xrep_reap_fsblocks(sc, &xc->old_cowfork_fsblocks,
&XFS_RMAP_OINFO_COW);
if (error)
goto out_bitmap;
out_bitmap:
xfsb_bitmap_destroy(&xc->old_cowfork_fsblocks);
xoff_bitmap_destroy(&xc->bad_fileoffs);
kmem_free(xc);
return error;
}

37
fs/xfs/scrub/fsb_bitmap.h Normal file
View File

@ -0,0 +1,37 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2018-2023 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <djwong@kernel.org>
*/
#ifndef __XFS_SCRUB_FSB_BITMAP_H__
#define __XFS_SCRUB_FSB_BITMAP_H__
/* Bitmaps, but for type-checked for xfs_fsblock_t */
struct xfsb_bitmap {
struct xbitmap64 fsbitmap;
};
static inline void xfsb_bitmap_init(struct xfsb_bitmap *bitmap)
{
xbitmap64_init(&bitmap->fsbitmap);
}
static inline void xfsb_bitmap_destroy(struct xfsb_bitmap *bitmap)
{
xbitmap64_destroy(&bitmap->fsbitmap);
}
static inline int xfsb_bitmap_set(struct xfsb_bitmap *bitmap,
xfs_fsblock_t start, xfs_filblks_t len)
{
return xbitmap64_set(&bitmap->fsbitmap, start, len);
}
static inline int xfsb_bitmap_walk(struct xfsb_bitmap *bitmap,
xbitmap64_walk_fn fn, void *priv)
{
return xbitmap64_walk(&bitmap->fsbitmap, fn, priv);
}
#endif /* __XFS_SCRUB_FSB_BITMAP_H__ */

37
fs/xfs/scrub/off_bitmap.h Normal file
View File

@ -0,0 +1,37 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2022-2023 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <djwong@kernel.org>
*/
#ifndef __XFS_SCRUB_OFF_BITMAP_H__
#define __XFS_SCRUB_OFF_BITMAP_H__
/* Bitmaps, but for type-checked for xfs_fileoff_t */
struct xoff_bitmap {
struct xbitmap64 offbitmap;
};
static inline void xoff_bitmap_init(struct xoff_bitmap *bitmap)
{
xbitmap64_init(&bitmap->offbitmap);
}
static inline void xoff_bitmap_destroy(struct xoff_bitmap *bitmap)
{
xbitmap64_destroy(&bitmap->offbitmap);
}
static inline int xoff_bitmap_set(struct xoff_bitmap *bitmap,
xfs_fileoff_t off, xfs_filblks_t len)
{
return xbitmap64_set(&bitmap->offbitmap, off, len);
}
static inline int xoff_bitmap_walk(struct xoff_bitmap *bitmap,
xbitmap64_walk_fn fn, void *priv)
{
return xbitmap64_walk(&bitmap->offbitmap, fn, priv);
}
#endif /* __XFS_SCRUB_OFF_BITMAP_H__ */

View File

@ -20,6 +20,7 @@
#include "xfs_ialloc_btree.h"
#include "xfs_rmap.h"
#include "xfs_rmap_btree.h"
#include "xfs_refcount.h"
#include "xfs_refcount_btree.h"
#include "xfs_extent_busy.h"
#include "xfs_ag.h"
@ -38,6 +39,7 @@
#include "scrub/repair.h"
#include "scrub/bitmap.h"
#include "scrub/agb_bitmap.h"
#include "scrub/fsb_bitmap.h"
#include "scrub/reap.h"
/*
@ -75,10 +77,10 @@
* with only the same rmap owner but the block is not owned by something with
* the same rmap owner, the block will be freed.
*
* The caller is responsible for locking the AG headers for the entire rebuild
* operation so that nothing else can sneak in and change the AG state while
* we're not looking. We must also invalidate any buffers associated with
* @bitmap.
* The caller is responsible for locking the AG headers/inode for the entire
* rebuild operation so that nothing else can sneak in and change the incore
* state while we're not looking. We must also invalidate any buffers
* associated with @bitmap.
*/
/* Information about reaping extents after a repair. */
@ -379,6 +381,17 @@ xreap_agextent_iter(
trace_xreap_dispose_unmap_extent(sc->sa.pag, agbno, *aglenp);
rs->force_roll = true;
if (rs->oinfo == &XFS_RMAP_OINFO_COW) {
/*
* If we're unmapping CoW staging extents, remove the
* records from the refcountbt, which will remove the
* rmap record as well.
*/
xfs_refcount_free_cow_extent(sc->tp, fsbno, *aglenp);
return 0;
}
return xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno,
*aglenp, rs->oinfo);
}
@ -397,6 +410,26 @@ xreap_agextent_iter(
return 0;
}
/*
* If we're getting rid of CoW staging extents, use deferred work items
* to remove the refcountbt records (which removes the rmap records)
* and free the extent. We're not worried about the system going down
* here because log recovery walks the refcount btree to clean out the
* CoW staging extents.
*/
if (rs->oinfo == &XFS_RMAP_OINFO_COW) {
ASSERT(rs->resv == XFS_AG_RESV_NONE);
xfs_refcount_free_cow_extent(sc->tp, fsbno, *aglenp);
error = xfs_free_extent_later(sc->tp, fsbno, *aglenp, NULL,
rs->resv, true);
if (error)
return error;
rs->force_roll = true;
return 0;
}
/* Put blocks back on the AGFL one at a time. */
if (rs->resv == XFS_AG_RESV_AGFL) {
ASSERT(*aglenp == 1);
@ -501,3 +534,115 @@ xrep_reap_agblocks(
return 0;
}
/*
* Break a file metadata extent into sub-extents by fate (crosslinked, not
* crosslinked), and dispose of each sub-extent separately. The extent must
* not cross an AG boundary.
*/
STATIC int
xreap_fsmeta_extent(
uint64_t fsbno,
uint64_t len,
void *priv)
{
struct xreap_state *rs = priv;
struct xfs_scrub *sc = rs->sc;
xfs_agnumber_t agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
xfs_agblock_t agbno_next = agbno + len;
int error = 0;
ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
ASSERT(sc->ip != NULL);
ASSERT(!sc->sa.pag);
/*
* We're reaping blocks after repairing file metadata, which means that
* we have to init the xchk_ag structure ourselves.
*/
sc->sa.pag = xfs_perag_get(sc->mp, agno);
if (!sc->sa.pag)
return -EFSCORRUPTED;
error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &sc->sa.agf_bp);
if (error)
goto out_pag;
while (agbno < agbno_next) {
xfs_extlen_t aglen;
bool crosslinked;
error = xreap_agextent_select(rs, agbno, agbno_next,
&crosslinked, &aglen);
if (error)
goto out_agf;
error = xreap_agextent_iter(rs, agbno, &aglen, crosslinked);
if (error)
goto out_agf;
if (xreap_want_defer_finish(rs)) {
/*
* Holds the AGF buffer across the deferred chain
* processing.
*/
error = xrep_defer_finish(sc);
if (error)
goto out_agf;
xreap_defer_finish_reset(rs);
} else if (xreap_want_roll(rs)) {
/*
* Hold the AGF buffer across the transaction roll so
* that we don't have to reattach it to the scrub
* context.
*/
xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
error = xfs_trans_roll_inode(&sc->tp, sc->ip);
xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
if (error)
goto out_agf;
xreap_reset(rs);
}
agbno += aglen;
}
out_agf:
xfs_trans_brelse(sc->tp, sc->sa.agf_bp);
sc->sa.agf_bp = NULL;
out_pag:
xfs_perag_put(sc->sa.pag);
sc->sa.pag = NULL;
return error;
}
/*
* Dispose of every block of every fs metadata extent in the bitmap.
* Do not use this to dispose of the mappings in an ondisk inode fork.
*/
int
xrep_reap_fsblocks(
struct xfs_scrub *sc,
struct xfsb_bitmap *bitmap,
const struct xfs_owner_info *oinfo)
{
struct xreap_state rs = {
.sc = sc,
.oinfo = oinfo,
.resv = XFS_AG_RESV_NONE,
};
int error;
ASSERT(xfs_has_rmapbt(sc->mp));
ASSERT(sc->ip != NULL);
error = xfsb_bitmap_walk(bitmap, xreap_fsmeta_extent, &rs);
if (error)
return error;
if (xreap_dirty(&rs))
return xrep_defer_finish(sc);
return 0;
}

View File

@ -6,7 +6,12 @@
#ifndef __XFS_SCRUB_REAP_H__
#define __XFS_SCRUB_REAP_H__
struct xagb_bitmap;
struct xfsb_bitmap;
int xrep_reap_agblocks(struct xfs_scrub *sc, struct xagb_bitmap *bitmap,
const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
int xrep_reap_fsblocks(struct xfs_scrub *sc, struct xfsb_bitmap *bitmap,
const struct xfs_owner_info *oinfo);
#endif /* __XFS_SCRUB_REAP_H__ */

View File

@ -27,6 +27,8 @@
#include "xfs_quota.h"
#include "xfs_qm.h"
#include "xfs_defer.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@ -883,6 +885,34 @@ xrep_reinit_pagi(
return 0;
}
/*
* Given an active reference to a perag structure, load AG headers and cursors.
* This should only be called to scan an AG while repairing file-based metadata.
*/
int
xrep_ag_init(
struct xfs_scrub *sc,
struct xfs_perag *pag,
struct xchk_ag *sa)
{
int error;
ASSERT(!sa->pag);
error = xfs_ialloc_read_agi(pag, sc->tp, &sa->agi_bp);
if (error)
return error;
error = xfs_alloc_read_agf(pag, sc->tp, 0, &sa->agf_bp);
if (error)
return error;
/* Grab our own passive reference from the caller's ref. */
sa->pag = xfs_perag_hold(pag);
xrep_ag_btcur_init(sc, sa);
return 0;
}
/* Reinitialize the per-AG block reservation for the AG we just fixed. */
int
xrep_reset_perag_resv(
@ -912,3 +942,23 @@ xrep_reset_perag_resv(
out:
return error;
}
/* Decide if we are going to call the repair function for a scrub type. */
bool
xrep_will_attempt(
struct xfs_scrub *sc)
{
/* Userspace asked us to rebuild the structure regardless. */
if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_FORCE_REBUILD)
return true;
/* Let debug users force us into the repair routines. */
if (XFS_TEST_ERROR(false, sc->mp, XFS_ERRTAG_FORCE_SCRUB_REPAIR))
return true;
/* Metadata is corrupt or failed cross-referencing. */
if (xchk_needs_repair(sc->sm))
return true;
return false;
}

View File

@ -28,6 +28,7 @@ static inline int xrep_notsupported(struct xfs_scrub *sc)
/* Repair helpers */
int xrep_attempt(struct xfs_scrub *sc, struct xchk_stats_run *run);
bool xrep_will_attempt(struct xfs_scrub *sc);
void xrep_failure(struct xfs_mount *mp);
int xrep_roll_ag_trans(struct xfs_scrub *sc);
int xrep_roll_trans(struct xfs_scrub *sc);
@ -48,6 +49,7 @@ xrep_trans_commit(
struct xbitmap;
struct xagb_bitmap;
struct xfsb_bitmap;
int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink);
@ -88,6 +90,8 @@ struct xfs_imap;
int xrep_setup_inode(struct xfs_scrub *sc, const struct xfs_imap *imap);
void xrep_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
int xrep_ag_init(struct xfs_scrub *sc, struct xfs_perag *pag,
struct xchk_ag *sa);
/* Metadata revalidators */
@ -105,6 +109,9 @@ int xrep_allocbt(struct xfs_scrub *sc);
int xrep_iallocbt(struct xfs_scrub *sc);
int xrep_refcountbt(struct xfs_scrub *sc);
int xrep_inode(struct xfs_scrub *sc);
int xrep_bmap_data(struct xfs_scrub *sc);
int xrep_bmap_attr(struct xfs_scrub *sc);
int xrep_bmap_cow(struct xfs_scrub *sc);
int xrep_reinit_pagf(struct xfs_scrub *sc);
int xrep_reinit_pagi(struct xfs_scrub *sc);
@ -112,6 +119,7 @@ int xrep_reinit_pagi(struct xfs_scrub *sc);
#else
#define xrep_ino_dqattach(sc) (0)
#define xrep_will_attempt(sc) (false)
static inline int
xrep_attempt(
@ -164,6 +172,9 @@ xrep_setup_nothing(
#define xrep_iallocbt xrep_notsupported
#define xrep_refcountbt xrep_notsupported
#define xrep_inode xrep_notsupported
#define xrep_bmap_data xrep_notsupported
#define xrep_bmap_attr xrep_notsupported
#define xrep_bmap_cow xrep_notsupported
#endif /* CONFIG_XFS_ONLINE_REPAIR */

View File

@ -14,8 +14,6 @@
#include "xfs_inode.h"
#include "xfs_quota.h"
#include "xfs_qm.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@ -288,19 +286,19 @@ static const struct xchk_meta_ops meta_scrub_ops[] = {
.type = ST_INODE,
.setup = xchk_setup_inode_bmap,
.scrub = xchk_bmap_data,
.repair = xrep_notsupported,
.repair = xrep_bmap_data,
},
[XFS_SCRUB_TYPE_BMBTA] = { /* inode attr fork */
.type = ST_INODE,
.setup = xchk_setup_inode_bmap,
.scrub = xchk_bmap_attr,
.repair = xrep_notsupported,
.repair = xrep_bmap_attr,
},
[XFS_SCRUB_TYPE_BMBTC] = { /* inode CoW fork */
.type = ST_INODE,
.setup = xchk_setup_inode_bmap,
.scrub = xchk_bmap_cow,
.repair = xrep_notsupported,
.repair = xrep_bmap_cow,
},
[XFS_SCRUB_TYPE_DIR] = { /* directory */
.type = ST_INODE,
@ -550,21 +548,11 @@ retry_op:
xchk_update_health(sc);
if (xchk_could_repair(sc)) {
bool needs_fix = xchk_needs_repair(sc->sm);
/* Userspace asked us to rebuild the structure regardless. */
if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_FORCE_REBUILD)
needs_fix = true;
/* Let debug users force us into the repair routines. */
if (XFS_TEST_ERROR(needs_fix, mp, XFS_ERRTAG_FORCE_SCRUB_REPAIR))
needs_fix = true;
/*
* If userspace asked for a repair but it wasn't necessary,
* report that back to userspace.
*/
if (!needs_fix) {
if (!xrep_will_attempt(sc)) {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_NO_REPAIR_NEEDED;
goto out_nofix;
}

View File

@ -1175,7 +1175,7 @@ DEFINE_EVENT(xrep_rmap_class, name, \
TP_ARGS(mp, agno, agbno, len, owner, offset, flags))
DEFINE_REPAIR_RMAP_EVENT(xrep_ibt_walk_rmap);
DEFINE_REPAIR_RMAP_EVENT(xrep_rmap_extent_fn);
DEFINE_REPAIR_RMAP_EVENT(xrep_bmap_extent_fn);
DEFINE_REPAIR_RMAP_EVENT(xrep_bmap_walk_rmap);
TRACE_EVENT(xrep_abt_found,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
@ -1260,6 +1260,38 @@ TRACE_EVENT(xrep_refc_found,
__entry->refcount)
)
TRACE_EVENT(xrep_bmap_found,
TP_PROTO(struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *irec),
TP_ARGS(ip, whichfork, irec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(int, whichfork)
__field(xfs_fileoff_t, lblk)
__field(xfs_filblks_t, len)
__field(xfs_fsblock_t, pblk)
__field(int, state)
),
TP_fast_assign(
__entry->dev = VFS_I(ip)->i_sb->s_dev;
__entry->ino = ip->i_ino;
__entry->whichfork = whichfork;
__entry->lblk = irec->br_startoff;
__entry->len = irec->br_blockcount;
__entry->pblk = irec->br_startblock;
__entry->state = irec->br_state;
),
TP_printk("dev %d:%d ino 0x%llx whichfork %s fileoff 0x%llx fsbcount 0x%llx startblock 0x%llx state %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
__entry->lblk,
__entry->len,
__entry->pblk,
__entry->state)
);
TRACE_EVENT(xrep_findroot_block,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
uint32_t magic, uint16_t level),
@ -1564,6 +1596,90 @@ TRACE_EVENT(xrep_dinode_count_rmaps,
__entry->attr_extents)
);
TRACE_EVENT(xrep_cow_mark_file_range,
TP_PROTO(struct xfs_inode *ip, xfs_fsblock_t startblock,
xfs_fileoff_t startoff, xfs_filblks_t blockcount),
TP_ARGS(ip, startblock, startoff, blockcount),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(xfs_fsblock_t, startblock)
__field(xfs_fileoff_t, startoff)
__field(xfs_filblks_t, blockcount)
),
TP_fast_assign(
__entry->dev = ip->i_mount->m_super->s_dev;
__entry->ino = ip->i_ino;
__entry->startoff = startoff;
__entry->startblock = startblock;
__entry->blockcount = blockcount;
),
TP_printk("dev %d:%d ino 0x%llx fileoff 0x%llx startblock 0x%llx fsbcount 0x%llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->startoff,
__entry->startblock,
__entry->blockcount)
);
TRACE_EVENT(xrep_cow_replace_mapping,
TP_PROTO(struct xfs_inode *ip, const struct xfs_bmbt_irec *irec,
xfs_fsblock_t new_startblock, xfs_extlen_t new_blockcount),
TP_ARGS(ip, irec, new_startblock, new_blockcount),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(xfs_fsblock_t, startblock)
__field(xfs_fileoff_t, startoff)
__field(xfs_filblks_t, blockcount)
__field(xfs_exntst_t, state)
__field(xfs_fsblock_t, new_startblock)
__field(xfs_extlen_t, new_blockcount)
),
TP_fast_assign(
__entry->dev = ip->i_mount->m_super->s_dev;
__entry->ino = ip->i_ino;
__entry->startoff = irec->br_startoff;
__entry->startblock = irec->br_startblock;
__entry->blockcount = irec->br_blockcount;
__entry->state = irec->br_state;
__entry->new_startblock = new_startblock;
__entry->new_blockcount = new_blockcount;
),
TP_printk("dev %d:%d ino 0x%llx startoff 0x%llx startblock 0x%llx fsbcount 0x%llx state 0x%x new_startblock 0x%llx new_fsbcount 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->startoff,
__entry->startblock,
__entry->blockcount,
__entry->state,
__entry->new_startblock,
__entry->new_blockcount)
);
TRACE_EVENT(xrep_cow_free_staging,
TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno,
xfs_extlen_t blockcount),
TP_ARGS(pag, agbno, blockcount),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, blockcount)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agbno = agbno;
__entry->blockcount = blockcount;
),
TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agbno,
__entry->blockcount)
);
#endif /* IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) */
#endif /* _TRACE_XFS_SCRUB_TRACE_H */

View File

@ -1236,6 +1236,68 @@ out_cancel:
return error;
}
/*
* Try to reserve more blocks for a transaction.
*
* This is for callers that need to attach resources to a transaction, scan
* those resources to determine the space reservation requirements, and then
* modify the attached resources. In other words, online repair. This can
* fail due to ENOSPC, so the caller must be able to cancel the transaction
* without shutting down the fs.
*/
int
xfs_trans_reserve_more(
struct xfs_trans *tp,
unsigned int blocks,
unsigned int rtextents)
{
struct xfs_trans_res resv = { };
return xfs_trans_reserve(tp, &resv, blocks, rtextents);
}
/*
* Try to reserve more blocks and file quota for a transaction. Same
* conditions of usage as xfs_trans_reserve_more.
*/
int
xfs_trans_reserve_more_inode(
struct xfs_trans *tp,
struct xfs_inode *ip,
unsigned int dblocks,
unsigned int rblocks,
bool force_quota)
{
struct xfs_trans_res resv = { };
struct xfs_mount *mp = ip->i_mount;
unsigned int rtx = xfs_extlen_to_rtxlen(mp, rblocks);
int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
error = xfs_trans_reserve(tp, &resv, dblocks, rtx);
if (error)
return error;
if (!XFS_IS_QUOTA_ON(mp) || xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
return 0;
if (tp->t_flags & XFS_TRANS_RESERVE)
force_quota = true;
error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks,
force_quota);
if (!error)
return 0;
/* Quota failed, give back the new reservation. */
xfs_mod_fdblocks(mp, dblocks, tp->t_flags & XFS_TRANS_RESERVE);
tp->t_blk_res -= dblocks;
xfs_mod_frextents(mp, rtx);
tp->t_rtx_res -= rtx;
return error;
}
/*
* Allocate an transaction in preparation for inode creation by reserving quota
* against the given dquots. Callers are not required to hold any inode locks.

View File

@ -164,6 +164,8 @@ typedef struct xfs_trans {
int xfs_trans_alloc(struct xfs_mount *mp, struct xfs_trans_res *resp,
uint blocks, uint rtextents, uint flags,
struct xfs_trans **tpp);
int xfs_trans_reserve_more(struct xfs_trans *tp,
unsigned int blocks, unsigned int rtextents);
int xfs_trans_alloc_empty(struct xfs_mount *mp,
struct xfs_trans **tpp);
void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
@ -248,6 +250,8 @@ struct xfs_dquot;
int xfs_trans_alloc_inode(struct xfs_inode *ip, struct xfs_trans_res *resv,
unsigned int dblocks, unsigned int rblocks, bool force,
struct xfs_trans **tpp);
int xfs_trans_reserve_more_inode(struct xfs_trans *tp, struct xfs_inode *ip,
unsigned int dblocks, unsigned int rblocks, bool force_quota);
int xfs_trans_alloc_icreate(struct xfs_mount *mp, struct xfs_trans_res *resv,
struct xfs_dquot *udqp, struct xfs_dquot *gdqp,
struct xfs_dquot *pdqp, unsigned int dblocks,