linux/fs/xfs/scrub/rmap.c
Dave Chinner 0800169e3e xfs: Pre-calculate per-AG agbno geometry
There is a lot of overhead in functions like xfs_verify_agbno() that
repeatedly calculate the geometry limits of an AG. These can be
pre-calculated as they are static and the verification context has
a per-ag context it can quickly reference.

In the case of xfs_verify_agbno(), we now always have a perag
context handy, so we can store the AG length and the minimum valid
block in the AG in the perag. This means we don't have to calculate
it on every call and it can be inlined in callers if we move it
to xfs_ag.h.

Move xfs_ag_block_count() to xfs_ag.c because it's really a
per-ag function and not an XFS type function. We need a little
bit of rework that is specific to xfs_initialise_perag() to allow
growfs to calculate the new perag sizes before we've updated the
primary superblock during the grow (chicken/egg situation).

Note that we leave the original xfs_verify_agbno in place in
xfs_types.c as a static function as other callers in that file do
not have per-ag contexts so still need to go the long way. It's been
renamed to xfs_verify_agno_agbno() to indicate it takes both an agno
and an agbno to differentiate it from new function.

Future commits will make similar changes for other per-ag geometry
validation functions.

Further:

$ size --totals fs/xfs/built-in.a
	   text    data     bss     dec     hex filename
before	1483006	 329588	    572	1813166	 1baaae	(TOTALS)
after	1482185	 329588	    572	1812345	 1ba779	(TOTALS)

This rework reduces the binary size by ~820 bytes, indicating
that much less work is being done to bounds check the agbno values
against on per-ag geometry information.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
2022-07-07 19:13:02 +10:00

236 lines
5.9 KiB
C

// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2017 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <darrick.wong@oracle.com>
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_rmap.h"
#include "xfs_refcount.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
#include "xfs_ag.h"
/*
* Set us up to scrub reverse mapping btrees.
*/
int
xchk_setup_ag_rmapbt(
struct xfs_scrub *sc)
{
return xchk_setup_ag_btree(sc, false);
}
/* Reverse-mapping scrubber. */
/* Cross-reference a rmap against the refcount btree. */
STATIC void
xchk_rmapbt_xref_refc(
struct xfs_scrub *sc,
struct xfs_rmap_irec *irec)
{
xfs_agblock_t fbno;
xfs_extlen_t flen;
bool non_inode;
bool is_bmbt;
bool is_attr;
bool is_unwritten;
int error;
if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return;
non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK;
is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK;
is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN;
/* If this is shared, must be a data fork extent. */
error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock,
irec->rm_blockcount, &fbno, &flen, false);
if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten))
xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
/* Cross-reference with the other btrees. */
STATIC void
xchk_rmapbt_xref(
struct xfs_scrub *sc,
struct xfs_rmap_irec *irec)
{
xfs_agblock_t agbno = irec->rm_startblock;
xfs_extlen_t len = irec->rm_blockcount;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
xchk_xref_is_used_space(sc, agbno, len);
if (irec->rm_owner == XFS_RMAP_OWN_INODES)
xchk_xref_is_inode_chunk(sc, agbno, len);
else
xchk_xref_is_not_inode_chunk(sc, agbno, len);
if (irec->rm_owner == XFS_RMAP_OWN_COW)
xchk_xref_is_cow_staging(sc, irec->rm_startblock,
irec->rm_blockcount);
else
xchk_rmapbt_xref_refc(sc, irec);
}
/* Scrub an rmapbt record. */
STATIC int
xchk_rmapbt_rec(
struct xchk_btree *bs,
const union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_rmap_irec irec;
struct xfs_perag *pag = bs->cur->bc_ag.pag;
bool non_inode;
bool is_unwritten;
bool is_bmbt;
bool is_attr;
int error;
error = xfs_rmap_btrec_to_irec(rec, &irec);
if (!xchk_btree_process_error(bs->sc, bs->cur, 0, &error))
goto out;
/* Check extent. */
if (irec.rm_startblock + irec.rm_blockcount <= irec.rm_startblock)
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (irec.rm_owner == XFS_RMAP_OWN_FS) {
/*
* xfs_verify_agbno returns false for static fs metadata.
* Since that only exists at the start of the AG, validate
* that by hand.
*/
if (irec.rm_startblock != 0 ||
irec.rm_blockcount != XFS_AGFL_BLOCK(mp) + 1)
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
} else {
/*
* Otherwise we must point somewhere past the static metadata
* but before the end of the FS. Run the regular check.
*/
if (!xfs_verify_agbno(pag, irec.rm_startblock) ||
!xfs_verify_agbno(pag, irec.rm_startblock +
irec.rm_blockcount - 1))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
}
/* Check flags. */
non_inode = XFS_RMAP_NON_INODE_OWNER(irec.rm_owner);
is_bmbt = irec.rm_flags & XFS_RMAP_BMBT_BLOCK;
is_attr = irec.rm_flags & XFS_RMAP_ATTR_FORK;
is_unwritten = irec.rm_flags & XFS_RMAP_UNWRITTEN;
if (is_bmbt && irec.rm_offset != 0)
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (non_inode && irec.rm_offset != 0)
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (is_unwritten && (is_bmbt || non_inode || is_attr))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (non_inode && (is_bmbt || is_unwritten || is_attr))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (!non_inode) {
if (!xfs_verify_ino(mp, irec.rm_owner))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
} else {
/* Non-inode owner within the magic values? */
if (irec.rm_owner <= XFS_RMAP_OWN_MIN ||
irec.rm_owner > XFS_RMAP_OWN_FS)
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
}
xchk_rmapbt_xref(bs->sc, &irec);
out:
return error;
}
/* Scrub the rmap btree for some AG. */
int
xchk_rmapbt(
struct xfs_scrub *sc)
{
return xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec,
&XFS_RMAP_OINFO_AG, NULL);
}
/* xref check that the extent is owned by a given owner */
static inline void
xchk_xref_check_owner(
struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len,
const struct xfs_owner_info *oinfo,
bool should_have_rmap)
{
bool has_rmap;
int error;
if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo,
&has_rmap);
if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (has_rmap != should_have_rmap)
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
/* xref check that the extent is owned by a given owner */
void
xchk_xref_is_owned_by(
struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len,
const struct xfs_owner_info *oinfo)
{
xchk_xref_check_owner(sc, bno, len, oinfo, true);
}
/* xref check that the extent is not owned by a given owner */
void
xchk_xref_is_not_owned_by(
struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len,
const struct xfs_owner_info *oinfo)
{
xchk_xref_check_owner(sc, bno, len, oinfo, false);
}
/* xref check that the extent has no reverse mapping at all */
void
xchk_xref_has_no_owner(
struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len)
{
bool has_rmap;
int error;
if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap);
if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (has_rmap)
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}