xfs: convert perag to use xarrays [v5.5 01/10]

Convert the xfs_mount perag tree to use an xarray instead of a radix
 tree.  There should be no functional changes here.
 
 With a bit of luck, this should all go splendidly.
 
 Signed-off-by: Darrick J. Wong <djwong@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQ2qTKExjcn+O1o2YRKO3ySh0YRpgUCZyqQcwAKCRBKO3ySh0YR
 pmKsAQDko7YQ/dJZsjDvBJRUEnjrJqK9ukR9Ovc00ak0WXIDYQD/cdm8xzkixHn2
 yfwsuFxIm6k0PPzb9koSCDT/CQS6QAA=
 =WGih
 -----END PGP SIGNATURE-----

Merge tag 'perag-xarray-6.13_2024-11-05' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into staging-merge

xfs: convert perag to use xarrays [v5.5 01/10]

Convert the xfs_mount perag tree to use an xarray instead of a radix
tree.  There should be no functional changes here.

With a bit of luck, this should all go splendidly.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
Carlos Maiolino 2024-11-12 10:57:32 +01:00
commit 131ffe5e69
44 changed files with 458 additions and 530 deletions

View File

@ -107,8 +107,7 @@ xfs_perag_rele(
struct xfs_perag *pag)
{
trace_xfs_perag_rele(pag, _RET_IP_);
if (atomic_dec_and_test(&pag->pag_active_ref))
wake_up(&pag->pag_active_wq);
atomic_dec(&pag->pag_active_ref);
}
/*
@ -273,6 +272,10 @@ xfs_agino_range(
return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last);
}
/*
* Update the perag of the previous tail AG if it has been changed during
* recovery (i.e. recovery of a growfs).
*/
int
xfs_update_last_ag_size(
struct xfs_mount *mp,
@ -290,80 +293,92 @@ xfs_update_last_ag_size(
return 0;
}
static int
xfs_perag_alloc(
struct xfs_mount *mp,
xfs_agnumber_t index,
xfs_agnumber_t agcount,
xfs_rfsblock_t dblocks)
{
struct xfs_perag *pag;
int error;
pag = kzalloc(sizeof(*pag), GFP_KERNEL);
if (!pag)
return -ENOMEM;
#ifdef __KERNEL__
/* Place kernel structure only init below this point. */
spin_lock_init(&pag->pag_ici_lock);
spin_lock_init(&pag->pagb_lock);
spin_lock_init(&pag->pag_state_lock);
INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
xfs_defer_drain_init(&pag->pag_intents_drain);
init_waitqueue_head(&pag->pagb_wait);
pag->pagb_tree = RB_ROOT;
xfs_hooks_init(&pag->pag_rmap_update_hooks);
#endif /* __KERNEL__ */
error = xfs_buf_cache_init(&pag->pag_bcache);
if (error)
goto out_defer_drain_free;
/*
* Pre-calculated geometry
*/
pag->block_count = __xfs_ag_block_count(mp, index, agcount, dblocks);
pag->min_block = XFS_AGFL_BLOCK(mp);
__xfs_agino_range(mp, pag->block_count, &pag->agino_min,
&pag->agino_max);
pag->pag_agno = index;
pag->pag_mount = mp;
/* Active ref owned by mount indicates AG is online. */
atomic_set(&pag->pag_active_ref, 1);
error = xa_insert(&mp->m_perags, index, pag, GFP_KERNEL);
if (error) {
WARN_ON_ONCE(error == -EBUSY);
goto out_buf_cache_destroy;
}
return 0;
out_buf_cache_destroy:
xfs_buf_cache_destroy(&pag->pag_bcache);
out_defer_drain_free:
xfs_defer_drain_free(&pag->pag_intents_drain);
kfree(pag);
return error;
}
int
xfs_initialize_perag(
struct xfs_mount *mp,
xfs_agnumber_t old_agcount,
xfs_agnumber_t orig_agcount,
xfs_agnumber_t new_agcount,
xfs_rfsblock_t dblocks,
xfs_agnumber_t *maxagi)
{
struct xfs_perag *pag;
xfs_agnumber_t index;
int error;
for (index = old_agcount; index < new_agcount; index++) {
pag = kzalloc(sizeof(*pag), GFP_KERNEL);
if (!pag) {
error = -ENOMEM;
goto out_unwind_new_pags;
}
pag->pag_agno = index;
pag->pag_mount = mp;
if (orig_agcount >= new_agcount)
return 0;
error = xa_insert(&mp->m_perags, index, pag, GFP_KERNEL);
if (error) {
WARN_ON_ONCE(error == -EBUSY);
goto out_free_pag;
}
#ifdef __KERNEL__
/* Place kernel structure only init below this point. */
spin_lock_init(&pag->pag_ici_lock);
spin_lock_init(&pag->pagb_lock);
spin_lock_init(&pag->pag_state_lock);
INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
xfs_defer_drain_init(&pag->pag_intents_drain);
init_waitqueue_head(&pag->pagb_wait);
init_waitqueue_head(&pag->pag_active_wq);
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
xfs_hooks_init(&pag->pag_rmap_update_hooks);
#endif /* __KERNEL__ */
error = xfs_buf_cache_init(&pag->pag_bcache);
for (index = orig_agcount; index < new_agcount; index++) {
error = xfs_perag_alloc(mp, index, new_agcount, dblocks);
if (error)
goto out_remove_pag;
/* Active ref owned by mount indicates AG is online. */
atomic_set(&pag->pag_active_ref, 1);
/*
* Pre-calculated geometry
*/
pag->block_count = __xfs_ag_block_count(mp, index, new_agcount,
dblocks);
pag->min_block = XFS_AGFL_BLOCK(mp);
__xfs_agino_range(mp, pag->block_count, &pag->agino_min,
&pag->agino_max);
goto out_unwind_new_pags;
}
index = xfs_set_inode_alloc(mp, new_agcount);
if (maxagi)
*maxagi = index;
*maxagi = xfs_set_inode_alloc(mp, new_agcount);
mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
return 0;
out_remove_pag:
xfs_defer_drain_free(&pag->pag_intents_drain);
pag = xa_erase(&mp->m_perags, index);
out_free_pag:
kfree(pag);
out_unwind_new_pags:
xfs_free_perag_range(mp, old_agcount, index);
xfs_free_perag_range(mp, orig_agcount, index);
return error;
}
@ -872,7 +887,7 @@ xfs_ag_shrink_space(
/* internal log shouldn't also show up in the free space btrees */
error = xfs_alloc_vextent_exact_bno(&args,
XFS_AGB_TO_FSB(mp, pag->pag_agno, aglen - delta));
xfs_agbno_to_fsb(pag, aglen - delta));
if (!error && args.agbno == NULLAGBLOCK)
error = -ENOSPC;

View File

@ -34,7 +34,6 @@ struct xfs_perag {
xfs_agnumber_t pag_agno; /* AG this structure belongs to */
atomic_t pag_ref; /* passive reference count */
atomic_t pag_active_ref; /* active reference count */
wait_queue_head_t pag_active_wq;/* woken active_ref falls to zero */
unsigned long pag_opstate;
uint8_t pagf_bno_level; /* # of levels in bno btree */
uint8_t pagf_cnt_level; /* # of levels in cnt btree */
@ -55,7 +54,6 @@ struct xfs_perag {
xfs_agino_t pagl_leftrec;
xfs_agino_t pagl_rightrec;
int pagb_count; /* pagb slots in use */
uint8_t pagf_refcount_level; /* recount btree height */
/* Blocks reserved for all kinds of metadata. */
@ -144,8 +142,8 @@ __XFS_AG_OPSTATE(prefers_metadata, PREFERS_METADATA)
__XFS_AG_OPSTATE(allows_inodes, ALLOWS_INODES)
__XFS_AG_OPSTATE(agfl_needs_reset, AGFL_NEEDS_RESET)
int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t old_agcount,
xfs_agnumber_t agcount, xfs_rfsblock_t dcount,
int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t orig_agcount,
xfs_agnumber_t new_agcount, xfs_rfsblock_t dcount,
xfs_agnumber_t *maxagi);
void xfs_free_perag_range(struct xfs_mount *mp, xfs_agnumber_t first_agno,
xfs_agnumber_t end_agno);
@ -332,4 +330,28 @@ int xfs_ag_extend_space(struct xfs_perag *pag, struct xfs_trans *tp,
xfs_extlen_t len);
int xfs_ag_get_geometry(struct xfs_perag *pag, struct xfs_ag_geometry *ageo);
static inline xfs_fsblock_t
xfs_agbno_to_fsb(
struct xfs_perag *pag,
xfs_agblock_t agbno)
{
return XFS_AGB_TO_FSB(pag->pag_mount, pag->pag_agno, agbno);
}
static inline xfs_daddr_t
xfs_agbno_to_daddr(
struct xfs_perag *pag,
xfs_agblock_t agbno)
{
return XFS_AGB_TO_DADDR(pag->pag_mount, pag->pag_agno, agbno);
}
static inline xfs_ino_t
xfs_agino_to_ino(
struct xfs_perag *pag,
xfs_agino_t agino)
{
return XFS_AGINO_TO_INO(pag->pag_mount, pag->pag_agno, agino);
}
#endif /* __LIBXFS_AG_H */

View File

@ -206,8 +206,7 @@ __xfs_ag_resv_init(
else
error = xfs_dec_fdblocks(mp, hidden_space, true);
if (error) {
trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
error, _RET_IP_);
trace_xfs_ag_resv_init_error(pag, error, _RET_IP_);
xfs_warn(mp,
"Per-AG reservation for AG %u failed. Filesystem may run out of space.",
pag->pag_agno);

View File

@ -1252,14 +1252,14 @@ xfs_alloc_ag_vextent_small(
if (fbno == NULLAGBLOCK)
goto out;
xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
xfs_extent_busy_reuse(args->pag, fbno, 1,
(args->datatype & XFS_ALLOC_NOBUSY));
if (args->datatype & XFS_ALLOC_USERDATA) {
struct xfs_buf *bp;
error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
xfs_agbno_to_daddr(args->pag, fbno),
args->mp->m_bsize, 0, &bp);
if (error)
goto error;
@ -2037,7 +2037,6 @@ int
xfs_free_ag_extent(
struct xfs_trans *tp,
struct xfs_buf *agbp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
xfs_extlen_t len,
const struct xfs_owner_info *oinfo,
@ -2358,19 +2357,19 @@ xfs_free_ag_extent(
* Update the freespace totals in the ag and superblock.
*/
error = xfs_alloc_update_counters(tp, agbp, len);
xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len);
xfs_ag_resv_free_extent(pag, type, tp, len);
if (error)
goto error0;
XFS_STATS_INC(mp, xs_freex);
XFS_STATS_ADD(mp, xs_freeb, len);
trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
trace_xfs_free_extent(pag, bno, len, type, haveleft, haveright);
return 0;
error0:
trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
trace_xfs_free_extent(pag, bno, len, type, -1, -1);
if (bno_cur)
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
if (cnt_cur)
@ -2934,9 +2933,8 @@ xfs_alloc_fix_freelist(
* Deferring the free disconnects freeing up the AGFL slot from
* freeing the block.
*/
error = xfs_free_extent_later(tp,
XFS_AGB_TO_FSB(mp, args->agno, bno), 1,
&targs.oinfo, XFS_AG_RESV_AGFL, 0);
error = xfs_free_extent_later(tp, xfs_agbno_to_fsb(pag, bno),
1, &targs.oinfo, XFS_AG_RESV_AGFL, 0);
if (error)
goto out_agbp_relse;
}
@ -3360,7 +3358,7 @@ xfs_read_agf(
struct xfs_mount *mp = pag->pag_mount;
int error;
trace_xfs_read_agf(pag->pag_mount, pag->pag_agno);
trace_xfs_read_agf(pag);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGF_DADDR(mp)),
@ -3391,7 +3389,7 @@ xfs_alloc_read_agf(
int error;
int allocbt_blks;
trace_xfs_alloc_read_agf(pag->pag_mount, pag->pag_agno);
trace_xfs_alloc_read_agf(pag);
/* We don't support trylock when freeing. */
ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
@ -3595,7 +3593,7 @@ xfs_alloc_vextent_finish(
goto out_drop_perag;
}
args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
args->fsbno = xfs_agbno_to_fsb(args->pag, args->agbno);
ASSERT(args->len >= args->minlen);
ASSERT(args->len <= args->maxlen);
@ -3616,7 +3614,7 @@ xfs_alloc_vextent_finish(
if (error)
goto out_drop_perag;
ASSERT(!xfs_extent_busy_search(mp, args->pag, args->agbno,
ASSERT(!xfs_extent_busy_search(args->pag, args->agbno,
args->len));
}
@ -3647,7 +3645,6 @@ xfs_alloc_vextent_this_ag(
struct xfs_alloc_arg *args,
xfs_agnumber_t agno)
{
struct xfs_mount *mp = args->mp;
xfs_agnumber_t minimum_agno;
uint32_t alloc_flags = 0;
int error;
@ -3660,8 +3657,8 @@ xfs_alloc_vextent_this_ag(
trace_xfs_alloc_vextent_this_ag(args);
error = xfs_alloc_vextent_check_args(args, XFS_AGB_TO_FSB(mp, agno, 0),
&minimum_agno);
error = xfs_alloc_vextent_check_args(args,
xfs_agbno_to_fsb(args->pag, 0), &minimum_agno);
if (error) {
if (error == -ENOSPC)
return 0;
@ -4010,8 +4007,7 @@ __xfs_free_extent(
goto err_release;
}
error = xfs_free_ag_extent(tp, agbp, pag->pag_agno, agbno, len, oinfo,
type);
error = xfs_free_ag_extent(tp, agbp, agbno, len, oinfo, type);
if (error)
goto err_release;

View File

@ -79,9 +79,8 @@ int xfs_alloc_put_freelist(struct xfs_perag *pag, struct xfs_trans *tp,
struct xfs_buf *agfbp, struct xfs_buf *agflbp,
xfs_agblock_t bno, int btreeblk);
int xfs_free_ag_extent(struct xfs_trans *tp, struct xfs_buf *agbp,
xfs_agnumber_t agno, xfs_agblock_t bno,
xfs_extlen_t len, const struct xfs_owner_info *oinfo,
enum xfs_ag_resv_type type);
xfs_agblock_t bno, xfs_extlen_t len,
const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
/*
* Compute and fill in value of m_alloc_maxlevels.

View File

@ -86,7 +86,7 @@ xfs_allocbt_alloc_block(
}
atomic64_inc(&cur->bc_mp->m_allocbt_blks);
xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.pag, bno, 1, false);
xfs_extent_busy_reuse(cur->bc_ag.pag, bno, 1, false);
new->s = cpu_to_be32(bno);

View File

@ -1017,21 +1017,20 @@ xfs_btree_readahead_agblock(
struct xfs_btree_block *block)
{
struct xfs_mount *mp = cur->bc_mp;
xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
int rval = 0;
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
xfs_buf_readahead(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, left),
xfs_agbno_to_daddr(cur->bc_ag.pag, left),
mp->m_bsize, cur->bc_ops->buf_ops);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
xfs_buf_readahead(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, right),
xfs_agbno_to_daddr(cur->bc_ag.pag, right),
mp->m_bsize, cur->bc_ops->buf_ops);
rval++;
}
@ -1091,7 +1090,7 @@ xfs_btree_ptr_to_daddr(
switch (cur->bc_ops->type) {
case XFS_BTREE_TYPE_AG:
*daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_ag.pag->pag_agno,
*daddr = xfs_agbno_to_daddr(cur->bc_ag.pag,
be32_to_cpu(ptr->s));
break;
case XFS_BTREE_TYPE_INODE:

View File

@ -606,15 +606,12 @@ xfs_inobt_insert_sprec(
goto error;
}
trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino,
rec.ir_holemask, nrec->ir_startino,
nrec->ir_holemask);
trace_xfs_irec_merge_pre(pag, &rec, nrec);
/* merge to nrec to output the updated record */
__xfs_inobt_rec_merge(nrec, &rec);
trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino,
nrec->ir_holemask);
trace_xfs_irec_merge_post(pag, nrec);
error = xfs_inobt_rec_check_count(mp, nrec);
if (error)
@ -768,8 +765,7 @@ xfs_ialloc_ag_alloc(
/* Allow space for the inode btree to split. */
args.minleft = igeo->inobt_maxlevels;
error = xfs_alloc_vextent_exact_bno(&args,
XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
args.agbno));
xfs_agbno_to_fsb(pag, args.agbno));
if (error)
return error;
@ -811,8 +807,8 @@ xfs_ialloc_ag_alloc(
*/
args.minleft = igeo->inobt_maxlevels;
error = xfs_alloc_vextent_near_bno(&args,
XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
be32_to_cpu(agi->agi_root)));
xfs_agbno_to_fsb(pag,
be32_to_cpu(agi->agi_root)));
if (error)
return error;
}
@ -824,8 +820,8 @@ xfs_ialloc_ag_alloc(
if (isaligned && args.fsbno == NULLFSBLOCK) {
args.alignment = igeo->cluster_align;
error = xfs_alloc_vextent_near_bno(&args,
XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
be32_to_cpu(agi->agi_root)));
xfs_agbno_to_fsb(pag,
be32_to_cpu(agi->agi_root)));
if (error)
return error;
}
@ -860,8 +856,8 @@ sparse_alloc:
igeo->ialloc_blks;
error = xfs_alloc_vextent_near_bno(&args,
XFS_AGB_TO_FSB(args.mp, pag->pag_agno,
be32_to_cpu(agi->agi_root)));
xfs_agbno_to_fsb(pag,
be32_to_cpu(agi->agi_root)));
if (error)
return error;
@ -915,8 +911,7 @@ sparse_alloc:
if (error == -EFSCORRUPTED) {
xfs_alert(args.mp,
"invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
XFS_AGINO_TO_INO(args.mp, pag->pag_agno,
rec.ir_startino),
xfs_agino_to_ino(pag, rec.ir_startino),
rec.ir_holemask, rec.ir_count);
xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
}
@ -1335,7 +1330,7 @@ alloc_inode:
ASSERT(offset < XFS_INODES_PER_CHUNK);
ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
XFS_INODES_PER_CHUNK) == 0);
ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
ino = xfs_agino_to_ino(pag, rec.ir_startino + offset);
if (xfs_ag_has_sickness(pag, XFS_SICK_AG_INODES)) {
error = xfs_dialloc_check_ino(pag, tp, ino);
@ -1616,7 +1611,7 @@ xfs_dialloc_ag(
ASSERT(offset < XFS_INODES_PER_CHUNK);
ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
XFS_INODES_PER_CHUNK) == 0);
ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
ino = xfs_agino_to_ino(pag, rec.ir_startino + offset);
if (xfs_ag_has_sickness(pag, XFS_SICK_AG_INODES)) {
error = xfs_dialloc_check_ino(pag, tp, ino);
@ -1974,7 +1969,7 @@ retry:
static int
xfs_difree_inode_chunk(
struct xfs_trans *tp,
xfs_agnumber_t agno,
struct xfs_perag *pag,
struct xfs_inobt_rec_incore *rec)
{
struct xfs_mount *mp = tp->t_mountp;
@ -1988,8 +1983,7 @@ xfs_difree_inode_chunk(
if (!xfs_inobt_issparse(rec->ir_holemask)) {
/* not sparse, calculate extent info directly */
return xfs_free_extent_later(tp,
XFS_AGB_TO_FSB(mp, agno, sagbno),
return xfs_free_extent_later(tp, xfs_agbno_to_fsb(pag, sagbno),
M_IGEO(mp)->ialloc_blks, &XFS_RMAP_OINFO_INODES,
XFS_AG_RESV_NONE, 0);
}
@ -2035,9 +2029,9 @@ xfs_difree_inode_chunk(
ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
error = xfs_free_extent_later(tp,
XFS_AGB_TO_FSB(mp, agno, agbno), contigblk,
&XFS_RMAP_OINFO_INODES, XFS_AG_RESV_NONE, 0);
error = xfs_free_extent_later(tp, xfs_agbno_to_fsb(pag, agbno),
contigblk, &XFS_RMAP_OINFO_INODES,
XFS_AG_RESV_NONE, 0);
if (error)
return error;
@ -2124,8 +2118,7 @@ xfs_difree_inobt(
if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE &&
mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
xic->deleted = true;
xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
rec.ir_startino);
xic->first_ino = xfs_agino_to_ino(pag, rec.ir_startino);
xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
/*
@ -2148,7 +2141,7 @@ xfs_difree_inobt(
goto error0;
}
error = xfs_difree_inode_chunk(tp, pag->pag_agno, &rec);
error = xfs_difree_inode_chunk(tp, pag, &rec);
if (error)
goto error0;
} else {
@ -2324,10 +2317,10 @@ xfs_difree(
return -EINVAL;
}
agino = XFS_INO_TO_AGINO(mp, inode);
if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
if (inode != xfs_agino_to_ino(pag, agino)) {
xfs_warn(mp, "%s: inode != xfs_agino_to_ino() (%llu != %llu).",
__func__, (unsigned long long)inode,
(unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
(unsigned long long)xfs_agino_to_ino(pag, agino));
ASSERT(0);
return -EINVAL;
}
@ -2458,7 +2451,7 @@ xfs_imap(
agino = XFS_INO_TO_AGINO(mp, ino);
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
if (agbno >= mp->m_sb.sb_agblocks ||
ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
ino != xfs_agino_to_ino(pag, agino)) {
error = -EINVAL;
#ifdef DEBUG
/*
@ -2473,11 +2466,11 @@ xfs_imap(
__func__, (unsigned long long)agbno,
(unsigned long)mp->m_sb.sb_agblocks);
}
if (ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
if (ino != xfs_agino_to_ino(pag, agino)) {
xfs_alert(mp,
"%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
"%s: ino (0x%llx) != xfs_agino_to_ino() (0x%llx)",
__func__, ino,
XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
xfs_agino_to_ino(pag, agino));
}
xfs_stack_trace();
#endif /* DEBUG */
@ -2507,7 +2500,7 @@ xfs_imap(
offset = XFS_INO_TO_OFFSET(mp, ino);
ASSERT(offset < mp->m_sb.sb_inopblock);
imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno);
imap->im_blkno = xfs_agbno_to_daddr(pag, agbno);
imap->im_len = XFS_FSB_TO_BB(mp, 1);
imap->im_boffset = (unsigned short)(offset <<
mp->m_sb.sb_inodelog);
@ -2537,7 +2530,7 @@ out_map:
offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
XFS_INO_TO_OFFSET(mp, ino);
imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno);
imap->im_blkno = xfs_agbno_to_daddr(pag, cluster_agbno);
imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
@ -2736,7 +2729,7 @@ xfs_read_agi(
struct xfs_mount *mp = pag->pag_mount;
int error;
trace_xfs_read_agi(pag->pag_mount, pag->pag_agno);
trace_xfs_read_agi(pag);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)),
@ -2767,7 +2760,7 @@ xfs_ialloc_read_agi(
struct xfs_agi *agi;
int error;
trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno);
trace_xfs_ialloc_read_agi(pag);
error = xfs_read_agi(pag, tp,
(flags & XFS_IALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,

View File

@ -120,7 +120,7 @@ __xfs_inobt_alloc_block(
args.resv = resv;
error = xfs_alloc_vextent_near_bno(&args,
XFS_AGB_TO_FSB(args.mp, args.pag->pag_agno, sbno));
xfs_agbno_to_fsb(args.pag, sbno));
if (error)
return error;

View File

@ -442,8 +442,8 @@ xfs_iunlink_update_bucket(
ASSERT(xfs_verify_agino_or_null(pag, new_agino));
old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
old_value, new_agino);
trace_xfs_iunlink_update_bucket(pag, bucket_index, old_value,
new_agino);
/*
* We should never find the head of the list already set to the value

View File

@ -1154,8 +1154,7 @@ xfs_refcount_adjust_extents(
goto out_error;
}
} else {
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
cur->bc_ag.pag->pag_agno,
fsbno = xfs_agbno_to_fsb(cur->bc_ag.pag,
tmp.rc_startblock);
error = xfs_free_extent_later(cur->bc_tp, fsbno,
tmp.rc_blockcount, NULL,
@ -1217,8 +1216,7 @@ xfs_refcount_adjust_extents(
}
goto advloop;
} else {
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
cur->bc_ag.pag->pag_agno,
fsbno = xfs_agbno_to_fsb(cur->bc_ag.pag,
ext.rc_startblock);
error = xfs_free_extent_later(cur->bc_tp, fsbno,
ext.rc_blockcount, NULL,
@ -1320,7 +1318,7 @@ xfs_refcount_continue_op(
return -EFSCORRUPTED;
}
ri->ri_startblock = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno);
ri->ri_startblock = xfs_agbno_to_fsb(pag, new_agbno);
ASSERT(xfs_verify_fsbext(mp, ri->ri_startblock, ri->ri_blockcount));
ASSERT(pag->pag_agno == XFS_FSB_TO_AGNO(mp, ri->ri_startblock));
@ -1956,8 +1954,7 @@ xfs_refcount_recover_cow_leftovers(
goto out_free;
/* Free the orphan record */
fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno,
rr->rr_rrec.rc_startblock);
fsb = xfs_agbno_to_fsb(pag, rr->rr_rrec.rc_startblock);
xfs_refcount_free_cow_extent(tp, fsb,
rr->rr_rrec.rc_blockcount);

View File

@ -74,8 +74,7 @@ xfs_refcountbt_alloc_block(
args.resv = XFS_AG_RESV_METADATA;
error = xfs_alloc_vextent_near_bno(&args,
XFS_AGB_TO_FSB(args.mp, args.pag->pag_agno,
xfs_refc_block(args.mp)));
xfs_agbno_to_fsb(args.pag, xfs_refc_block(args.mp)));
if (error)
goto out_error;
if (args.fsbno == NULLFSBLOCK) {

View File

@ -102,7 +102,7 @@ xfs_rmapbt_alloc_block(
return 0;
}
xfs_extent_busy_reuse(cur->bc_mp, pag, bno, 1, false);
xfs_extent_busy_reuse(pag, bno, 1, false);
new->s = cpu_to_be32(bno);
be32_add_cpu(&agf->agf_rmap_blocks, 1);

View File

@ -1038,12 +1038,10 @@ xrep_iunlink_reload_next(
{
struct xfs_scrub *sc = ragi->sc;
struct xfs_inode *ip;
xfs_ino_t ino;
xfs_agino_t ret = NULLAGINO;
int error;
ino = XFS_AGINO_TO_INO(sc->mp, sc->sa.pag->pag_agno, agino);
error = xchk_iget(ragi->sc, ino, &ip);
error = xchk_iget(ragi->sc, xfs_agino_to_ino(sc->sa.pag, agino), &ip);
if (error)
return ret;
@ -1278,9 +1276,7 @@ xrep_iunlink_mark_ondisk_rec(
* on because we haven't actually scrubbed the inobt or the
* inodes yet.
*/
error = xchk_iget(ragi->sc,
XFS_AGINO_TO_INO(mp, sc->sa.pag->pag_agno,
agino),
error = xchk_iget(ragi->sc, xfs_agino_to_ino(sc->sa.pag, agino),
&ip);
if (error)
continue;
@ -1539,15 +1535,13 @@ xrep_iunlink_relink_next(
ip = xfs_iunlink_lookup(pag, agino);
if (!ip) {
xfs_ino_t ino;
xfs_agino_t prev_agino;
/*
* No inode exists in cache. Load it off the disk so that we
* can reinsert it into the incore unlinked list.
*/
ino = XFS_AGINO_TO_INO(sc->mp, pag->pag_agno, agino);
error = xchk_iget(sc, ino, &ip);
error = xchk_iget(sc, xfs_agino_to_ino(pag, agino), &ip);
if (error)
return -EFSCORRUPTED;
@ -1601,15 +1595,13 @@ xrep_iunlink_relink_prev(
ip = xfs_iunlink_lookup(pag, agino);
if (!ip) {
xfs_ino_t ino;
xfs_agino_t next_agino;
/*
* No inode exists in cache. Load it off the disk so that we
* can reinsert it into the incore unlinked list.
*/
ino = XFS_AGINO_TO_INO(sc->mp, pag->pag_agno, agino);
error = xchk_iget(sc, ino, &ip);
error = xchk_iget(sc, xfs_agino_to_ino(pag, agino), &ip);
if (error)
return -EFSCORRUPTED;

View File

@ -210,7 +210,7 @@ xrep_abt_stash(
if (error)
return error;
trace_xrep_abt_found(sc->mp, sc->sa.pag->pag_agno, &arec);
trace_xrep_abt_found(sc->sa.pag, &arec);
error = xfarray_append(ra->free_records, &arec);
if (error)
@ -484,8 +484,8 @@ xrep_abt_reserve_space(
ASSERT(arec.ar_blockcount <= UINT_MAX);
len = min_t(unsigned int, arec.ar_blockcount, desired);
trace_xrep_newbt_alloc_ag_blocks(sc->mp, sc->sa.pag->pag_agno,
arec.ar_startblock, len, XFS_RMAP_OWN_AG);
trace_xrep_newbt_alloc_ag_blocks(sc->sa.pag, arec.ar_startblock,
len, XFS_RMAP_OWN_AG);
error = xrep_newbt_add_extent(&ra->new_bnobt, sc->sa.pag,
arec.ar_startblock, len);
@ -554,8 +554,8 @@ xrep_abt_dispose_one(
if (free_aglen == 0)
return 0;
trace_xrep_newbt_free_blocks(sc->mp, resv->pag->pag_agno, free_agbno,
free_aglen, ra->new_bnobt.oinfo.oi_owner);
trace_xrep_newbt_free_blocks(resv->pag, free_agbno, free_aglen,
ra->new_bnobt.oinfo.oi_owner);
error = __xfs_free_extent(sc->tp, resv->pag, free_agbno, free_aglen,
&ra->new_bnobt.oinfo, XFS_AG_RESV_IGNORE, true);

View File

@ -600,9 +600,8 @@ xchk_bmap_check_rmap(
if (irec.br_startoff != check_rec.rm_offset)
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
check_rec.rm_offset);
if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
cur->bc_ag.pag->pag_agno,
check_rec.rm_startblock))
if (irec.br_startblock !=
xfs_agbno_to_fsb(cur->bc_ag.pag, check_rec.rm_startblock))
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
check_rec.rm_offset);
if (irec.br_blockcount > check_rec.rm_blockcount)

View File

@ -237,7 +237,6 @@ xrep_bmap_walk_rmap(
void *priv)
{
struct xrep_bmap *rb = priv;
struct xfs_mount *mp = cur->bc_mp;
xfs_fsblock_t fsbno;
int error = 0;
@ -269,8 +268,7 @@ xrep_bmap_walk_rmap(
if ((rec->rm_flags & XFS_RMAP_UNWRITTEN) && !rb->allow_unwritten)
return -EFSCORRUPTED;
fsbno = XFS_AGB_TO_FSB(mp, cur->bc_ag.pag->pag_agno,
rec->rm_startblock);
fsbno = xfs_agbno_to_fsb(cur->bc_ag.pag, rec->rm_startblock);
if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK) {
rb->old_bmbt_block_count += rec->rm_blockcount;

View File

@ -1336,7 +1336,7 @@ xchk_inode_is_allocated(
}
/* reject inode numbers outside existing AGs */
ino = XFS_AGINO_TO_INO(sc->mp, pag->pag_agno, agino);
ino = xfs_agino_to_ino(pag, agino);
if (!xfs_verify_ino(mp, ino))
return -EINVAL;

View File

@ -137,7 +137,6 @@ xrep_cow_mark_shared_staging(
{
struct xrep_cow *xc = priv;
struct xfs_refcount_irec rrec;
xfs_fsblock_t fsbno;
if (!xfs_refcount_check_domain(rec) ||
rec->rc_domain != XFS_REFC_DOMAIN_SHARED)
@ -145,9 +144,9 @@ xrep_cow_mark_shared_staging(
xrep_cow_trim_refcount(xc, &rrec, rec);
fsbno = XFS_AGB_TO_FSB(xc->sc->mp, cur->bc_ag.pag->pag_agno,
rrec.rc_startblock);
return xrep_cow_mark_file_range(xc, fsbno, rrec.rc_blockcount);
return xrep_cow_mark_file_range(xc,
xfs_agbno_to_fsb(cur->bc_ag.pag, rrec.rc_startblock),
rrec.rc_blockcount);
}
/*
@ -178,8 +177,7 @@ xrep_cow_mark_missing_staging(
goto next;
error = xrep_cow_mark_file_range(xc,
XFS_AGB_TO_FSB(xc->sc->mp, cur->bc_ag.pag->pag_agno,
xc->next_bno),
xfs_agbno_to_fsb(cur->bc_ag.pag, xc->next_bno),
rrec.rc_startblock - xc->next_bno);
if (error)
return error;
@ -200,7 +198,6 @@ xrep_cow_mark_missing_staging_rmap(
void *priv)
{
struct xrep_cow *xc = priv;
xfs_fsblock_t fsbno;
xfs_agblock_t rec_bno;
xfs_extlen_t rec_len;
unsigned int adj;
@ -222,8 +219,8 @@ xrep_cow_mark_missing_staging_rmap(
rec_len -= adj;
}
fsbno = XFS_AGB_TO_FSB(xc->sc->mp, cur->bc_ag.pag->pag_agno, rec_bno);
return xrep_cow_mark_file_range(xc, fsbno, rec_len);
return xrep_cow_mark_file_range(xc,
xfs_agbno_to_fsb(cur->bc_ag.pag, rec_bno), rec_len);
}
/*
@ -275,8 +272,7 @@ xrep_cow_find_bad(
if (xc->next_bno < xc->irec_startbno + xc->irec.br_blockcount) {
error = xrep_cow_mark_file_range(xc,
XFS_AGB_TO_FSB(sc->mp, pag->pag_agno,
xc->next_bno),
xfs_agbno_to_fsb(pag, xc->next_bno),
xc->irec_startbno + xc->irec.br_blockcount -
xc->next_bno);
if (error)

View File

@ -303,7 +303,6 @@ xchk_iallocbt_check_cluster_ifree(
unsigned int irec_ino,
struct xfs_dinode *dip)
{
struct xfs_mount *mp = bs->cur->bc_mp;
xfs_ino_t fsino;
xfs_agino_t agino;
bool irec_free;
@ -319,7 +318,7 @@ xchk_iallocbt_check_cluster_ifree(
* the record, compute which fs inode we're talking about.
*/
agino = irec->ir_startino + irec_ino;
fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.pag->pag_agno, agino);
fsino = xfs_agino_to_ino(bs->cur->bc_ag.pag, agino);
irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
@ -368,7 +367,6 @@ xchk_iallocbt_check_cluster(
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_buf *cluster_bp;
unsigned int nr_inodes;
xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agblock_t agbno;
unsigned int cluster_index;
uint16_t cluster_mask = 0;
@ -396,7 +394,7 @@ xchk_iallocbt_check_cluster(
* ir_startino can be large enough to make im_boffset nonzero.
*/
ir_holemask = (irec->ir_holemask & cluster_mask);
imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
imap.im_blkno = xfs_agbno_to_daddr(bs->cur->bc_ag.pag, agbno);
imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) <<
mp->m_sb.sb_inodelog;
@ -407,7 +405,7 @@ xchk_iallocbt_check_cluster(
return 0;
}
trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino,
trace_xchk_iallocbt_check_cluster(bs->cur->bc_ag.pag, irec->ir_startino,
imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
cluster_mask, ir_holemask,
XFS_INO_TO_OFFSET(mp, irec->ir_startino +

View File

@ -146,15 +146,12 @@ xrep_ibt_check_ifree(
struct xfs_scrub *sc = ri->sc;
struct xfs_mount *mp = sc->mp;
struct xfs_dinode *dip;
xfs_ino_t fsino;
xfs_agino_t agino;
xfs_agnumber_t agno = ri->sc->sa.pag->pag_agno;
unsigned int cluster_buf_base;
unsigned int offset;
int error;
agino = cluster_ag_base + cluster_index;
fsino = XFS_AGINO_TO_INO(mp, agno, agino);
/* Inode uncached or half assembled, read disk buffer */
cluster_buf_base = XFS_INO_TO_OFFSET(mp, cluster_ag_base);
@ -165,7 +162,8 @@ xrep_ibt_check_ifree(
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)
return -EFSCORRUPTED;
if (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)
if (dip->di_version >= 3 &&
be64_to_cpu(dip->di_ino) != xfs_agino_to_ino(ri->sc->sa.pag, agino))
return -EFSCORRUPTED;
/* Will the in-core inode tell us if it's in use? */
@ -194,7 +192,7 @@ xrep_ibt_stash(
if (ri->rie.ir_freecount > 0)
ri->finobt_recs++;
trace_xrep_ibt_found(ri->sc->mp, ri->sc->sa.pag->pag_agno, &ri->rie);
trace_xrep_ibt_found(ri->sc->sa.pag, &ri->rie);
error = xfarray_append(ri->inode_records, &ri->rie);
if (error)
@ -307,7 +305,7 @@ xrep_ibt_process_cluster(
* inobt because imap_to_bp directly maps the buffer without touching
* either inode btree.
*/
imap.im_blkno = XFS_AGB_TO_DADDR(mp, sc->sa.pag->pag_agno, cluster_bno);
imap.im_blkno = xfs_agbno_to_daddr(sc->sa.pag, cluster_bno);
imap.im_len = XFS_FSB_TO_BB(mp, igeo->blocks_per_cluster);
imap.im_boffset = 0;
error = xfs_imap_to_bp(mp, sc->tp, &imap, &cluster_bp);
@ -423,9 +421,7 @@ xrep_ibt_record_inode_blocks(
if (error)
return error;
trace_xrep_ibt_walk_rmap(mp, ri->sc->sa.pag->pag_agno,
rec->rm_startblock, rec->rm_blockcount, rec->rm_owner,
rec->rm_offset, rec->rm_flags);
trace_xrep_ibt_walk_rmap(ri->sc->sa.pag, rec);
/*
* Record the free/hole masks for each inode cluster that could be
@ -634,7 +630,6 @@ xrep_ibt_build_new_trees(
struct xfs_scrub *sc = ri->sc;
struct xfs_btree_cur *ino_cur;
struct xfs_btree_cur *fino_cur = NULL;
xfs_fsblock_t fsbno;
bool need_finobt;
int error;
@ -656,9 +651,8 @@ xrep_ibt_build_new_trees(
*
* Start by setting up the inobt staging cursor.
*/
fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno,
XFS_IBT_BLOCK(sc->mp));
xrep_newbt_init_ag(&ri->new_inobt, sc, &XFS_RMAP_OINFO_INOBT, fsbno,
xrep_newbt_init_ag(&ri->new_inobt, sc, &XFS_RMAP_OINFO_INOBT,
xfs_agbno_to_fsb(sc->sa.pag, XFS_IBT_BLOCK(sc->mp)),
XFS_AG_RESV_NONE);
ri->new_inobt.bload.claim_block = xrep_ibt_claim_block;
ri->new_inobt.bload.get_records = xrep_ibt_get_records;
@ -677,10 +671,9 @@ xrep_ibt_build_new_trees(
if (sc->mp->m_finobt_nores)
resv = XFS_AG_RESV_NONE;
fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno,
XFS_FIBT_BLOCK(sc->mp));
xrep_newbt_init_ag(&ri->new_finobt, sc, &XFS_RMAP_OINFO_INOBT,
fsbno, resv);
xfs_agbno_to_fsb(sc->sa.pag, XFS_FIBT_BLOCK(sc->mp)),
resv);
ri->new_finobt.bload.claim_block = xrep_fibt_claim_block;
ri->new_finobt.bload.get_records = xrep_fibt_get_records;

View File

@ -186,11 +186,10 @@ xrep_newbt_add_extent(
xfs_agblock_t agbno,
xfs_extlen_t len)
{
struct xfs_mount *mp = xnr->sc->mp;
struct xfs_alloc_arg args = {
.tp = NULL, /* no autoreap */
.oinfo = xnr->oinfo,
.fsbno = XFS_AGB_TO_FSB(mp, pag->pag_agno, agbno),
.fsbno = xfs_agbno_to_fsb(pag, agbno),
.len = len,
.resv = xnr->resv,
};
@ -210,8 +209,8 @@ xrep_newbt_validate_ag_alloc_hint(
xfs_verify_fsbno(sc->mp, xnr->alloc_hint))
return;
xnr->alloc_hint = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno,
XFS_AGFL_BLOCK(sc->mp) + 1);
xnr->alloc_hint =
xfs_agbno_to_fsb(sc->sa.pag, XFS_AGFL_BLOCK(sc->mp) + 1);
}
/* Allocate disk space for a new per-AG btree. */
@ -251,16 +250,15 @@ xrep_newbt_alloc_ag_blocks(
return -ENOSPC;
agno = XFS_FSB_TO_AGNO(mp, args.fsbno);
trace_xrep_newbt_alloc_ag_blocks(mp, agno,
XFS_FSB_TO_AGBNO(mp, args.fsbno), args.len,
xnr->oinfo.oi_owner);
if (agno != sc->sa.pag->pag_agno) {
ASSERT(agno == sc->sa.pag->pag_agno);
return -EFSCORRUPTED;
}
trace_xrep_newbt_alloc_ag_blocks(sc->sa.pag,
XFS_FSB_TO_AGBNO(mp, args.fsbno), args.len,
xnr->oinfo.oi_owner);
error = xrep_newbt_add_blocks(xnr, sc->sa.pag, &args);
if (error)
return error;
@ -326,16 +324,16 @@ xrep_newbt_alloc_file_blocks(
agno = XFS_FSB_TO_AGNO(mp, args.fsbno);
trace_xrep_newbt_alloc_file_blocks(mp, agno,
XFS_FSB_TO_AGBNO(mp, args.fsbno), args.len,
xnr->oinfo.oi_owner);
pag = xfs_perag_get(mp, agno);
if (!pag) {
ASSERT(0);
return -EFSCORRUPTED;
}
trace_xrep_newbt_alloc_file_blocks(pag,
XFS_FSB_TO_AGBNO(mp, args.fsbno), args.len,
xnr->oinfo.oi_owner);
error = xrep_newbt_add_blocks(xnr, pag, &args);
xfs_perag_put(pag);
if (error)
@ -376,7 +374,6 @@ xrep_newbt_free_extent(
struct xfs_scrub *sc = xnr->sc;
xfs_agblock_t free_agbno = resv->agbno;
xfs_extlen_t free_aglen = resv->len;
xfs_fsblock_t fsbno;
int error;
if (!btree_committed || resv->used == 0) {
@ -385,8 +382,8 @@ xrep_newbt_free_extent(
* space reservation, let the existing EFI free the entire
* space extent.
*/
trace_xrep_newbt_free_blocks(sc->mp, resv->pag->pag_agno,
free_agbno, free_aglen, xnr->oinfo.oi_owner);
trace_xrep_newbt_free_blocks(resv->pag, free_agbno, free_aglen,
xnr->oinfo.oi_owner);
xfs_alloc_commit_autoreap(sc->tp, &resv->autoreap);
return 1;
}
@ -403,8 +400,8 @@ xrep_newbt_free_extent(
if (free_aglen == 0)
return 0;
trace_xrep_newbt_free_blocks(sc->mp, resv->pag->pag_agno, free_agbno,
free_aglen, xnr->oinfo.oi_owner);
trace_xrep_newbt_free_blocks(resv->pag, free_agbno, free_aglen,
xnr->oinfo.oi_owner);
ASSERT(xnr->resv != XFS_AG_RESV_AGFL);
ASSERT(xnr->resv != XFS_AG_RESV_IGNORE);
@ -413,9 +410,9 @@ xrep_newbt_free_extent(
* Use EFIs to free the reservations. This reduces the chance
* that we leak blocks if the system goes down.
*/
fsbno = XFS_AGB_TO_FSB(sc->mp, resv->pag->pag_agno, free_agbno);
error = xfs_free_extent_later(sc->tp, fsbno, free_aglen, &xnr->oinfo,
xnr->resv, XFS_FREE_EXTENT_SKIP_DISCARD);
error = xfs_free_extent_later(sc->tp,
xfs_agbno_to_fsb(resv->pag, free_agbno), free_aglen,
&xnr->oinfo, xnr->resv, XFS_FREE_EXTENT_SKIP_DISCARD);
if (error)
return error;
@ -516,7 +513,6 @@ xrep_newbt_claim_block(
union xfs_btree_ptr *ptr)
{
struct xrep_newbt_resv *resv;
struct xfs_mount *mp = cur->bc_mp;
xfs_agblock_t agbno;
/*
@ -541,12 +537,10 @@ xrep_newbt_claim_block(
if (resv->used == resv->len)
list_move_tail(&resv->list, &xnr->resv_list);
trace_xrep_newbt_claim_block(mp, resv->pag->pag_agno, agbno, 1,
xnr->oinfo.oi_owner);
trace_xrep_newbt_claim_block(resv->pag, agbno, 1, xnr->oinfo.oi_owner);
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
ptr->l = cpu_to_be64(XFS_AGB_TO_FSB(mp, resv->pag->pag_agno,
agbno));
ptr->l = cpu_to_be64(xfs_agbno_to_fsb(resv->pag, agbno));
else
ptr->s = cpu_to_be32(agbno);

View File

@ -263,7 +263,6 @@ xreap_agextent_binval(
struct xfs_scrub *sc = rs->sc;
struct xfs_perag *pag = sc->sa.pag;
struct xfs_mount *mp = sc->mp;
xfs_agnumber_t agno = sc->sa.pag->pag_agno;
xfs_agblock_t agbno_next = agbno + *aglenp;
xfs_agblock_t bno = agbno;
@ -284,7 +283,7 @@ xreap_agextent_binval(
*/
while (bno < agbno_next) {
struct xrep_bufscan scan = {
.daddr = XFS_AGB_TO_DADDR(mp, agno, bno),
.daddr = xfs_agbno_to_daddr(pag, bno),
.max_sectors = xrep_bufscan_max_sectors(mp,
agbno_next - bno),
.daddr_step = XFS_FSB_TO_BB(mp, 1),
@ -391,7 +390,7 @@ xreap_agextent_iter(
xfs_fsblock_t fsbno;
int error = 0;
fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno, agbno);
fsbno = xfs_agbno_to_fsb(sc->sa.pag, agbno);
/*
* If there are other rmappings, this block is cross linked and must
@ -780,7 +779,6 @@ xreap_bmapi_binval(
xfs_fileoff_t off;
xfs_fileoff_t max_off;
xfs_extlen_t scan_blocks;
xfs_agnumber_t agno = sc->sa.pag->pag_agno;
xfs_agblock_t bno;
xfs_agblock_t agbno;
xfs_agblock_t agbno_next;
@ -837,7 +835,7 @@ xreap_bmapi_binval(
*/
while (bno < agbno_next) {
struct xrep_bufscan scan = {
.daddr = XFS_AGB_TO_DADDR(mp, agno, bno),
.daddr = xfs_agbno_to_daddr(pag, bno),
.max_sectors = xrep_bufscan_max_sectors(mp,
scan_blocks),
.daddr_step = XFS_FSB_TO_BB(mp, 1),

View File

@ -590,7 +590,6 @@ xrep_refc_build_new_tree(
struct xfs_scrub *sc = rr->sc;
struct xfs_btree_cur *refc_cur;
struct xfs_perag *pag = sc->sa.pag;
xfs_fsblock_t fsbno;
int error;
error = xrep_refc_sort_records(rr);
@ -603,8 +602,8 @@ xrep_refc_build_new_tree(
* to root the new btree while it's under construction and before we
* attach it to the AG header.
*/
fsbno = XFS_AGB_TO_FSB(sc->mp, pag->pag_agno, xfs_refc_block(sc->mp));
xrep_newbt_init_ag(&rr->new_btree, sc, &XFS_RMAP_OINFO_REFC, fsbno,
xrep_newbt_init_ag(&rr->new_btree, sc, &XFS_RMAP_OINFO_REFC,
xfs_agbno_to_fsb(pag, xfs_refc_block(sc->mp)),
XFS_AG_RESV_METADATA);
rr->new_btree.bload.get_records = xrep_refc_get_records;
rr->new_btree.bload.claim_block = xrep_refc_claim_block;

View File

@ -331,10 +331,8 @@ xrep_calc_ag_resblks(
freelen = aglen;
usedlen = aglen;
}
xfs_perag_put(pag);
trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
freelen, usedlen);
trace_xrep_calc_ag_resblks(pag, icount, aglen, freelen, usedlen);
/*
* Figure out how many blocks we'd need worst case to rebuild
@ -372,8 +370,9 @@ xrep_calc_ag_resblks(
rmapbt_sz = 0;
}
trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
inobt_sz, rmapbt_sz, refcbt_sz);
trace_xrep_calc_ag_resblks_btsize(pag, bnobt_sz, inobt_sz, rmapbt_sz,
refcbt_sz);
xfs_perag_put(pag);
return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
}
@ -483,7 +482,7 @@ xrep_findroot_block(
int block_level;
int error = 0;
daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.pag->pag_agno, agbno);
daddr = xfs_agbno_to_daddr(ri->sc->sa.pag, agbno);
/*
* Blocks in the AGFL have stale contents that might just happen to
@ -612,7 +611,7 @@ xrep_findroot_block(
else
fab->root = NULLAGBLOCK;
trace_xrep_findroot_block(mp, ri->sc->sa.pag->pag_agno, agbno,
trace_xrep_findroot_block(ri->sc->sa.pag, agbno,
be32_to_cpu(btblock->bb_magic), fab->height - 1);
out:
xfs_trans_brelse(ri->sc->tp, bp);

View File

@ -231,7 +231,7 @@ xrep_rmap_stash(
if (xchk_iscan_aborted(&rr->iscan))
return -EFSCORRUPTED;
trace_xrep_rmap_found(sc->mp, sc->sa.pag->pag_agno, &rmap);
trace_xrep_rmap_found(sc->sa.pag, &rmap);
mutex_lock(&rr->lock);
mcur = xfs_rmapbt_mem_cursor(sc->sa.pag, sc->tp, &rr->rmap_btree);
@ -1272,7 +1272,6 @@ xrep_rmap_build_new_tree(
struct xfs_perag *pag = sc->sa.pag;
struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
struct xfs_btree_cur *rmap_cur;
xfs_fsblock_t fsbno;
int error;
/*
@ -1290,9 +1289,9 @@ xrep_rmap_build_new_tree(
* rmapbt per-AG reservation, which we will adjust further after
* committing the new btree.
*/
fsbno = XFS_AGB_TO_FSB(sc->mp, pag->pag_agno, XFS_RMAP_BLOCK(sc->mp));
xrep_newbt_init_ag(&rr->new_btree, sc, &XFS_RMAP_OINFO_SKIP_UPDATE,
fsbno, XFS_AG_RESV_RMAPBT);
xfs_agbno_to_fsb(pag, XFS_RMAP_BLOCK(sc->mp)),
XFS_AG_RESV_RMAPBT);
rr->new_btree.bload.get_records = xrep_rmap_get_records;
rr->new_btree.bload.claim_block = xrep_rmap_claim_block;
rr->new_btree.alloc_vextent = xrep_rmap_alloc_vextent;
@ -1553,7 +1552,7 @@ xrep_rmapbt_live_update(
if (!xrep_rmapbt_want_live_update(&rr->iscan, &p->oinfo))
goto out_unlock;
trace_xrep_rmap_live_update(mp, rr->sc->sa.pag->pag_agno, action, p);
trace_xrep_rmap_live_update(rr->sc->sa.pag, action, p);
error = xrep_trans_alloc_hook_dummy(mp, &txcookie, &tp);
if (error)

View File

@ -772,12 +772,12 @@ TRACE_EVENT(xchk_xref_error,
);
TRACE_EVENT(xchk_iallocbt_check_cluster,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agino_t startino, xfs_daddr_t map_daddr,
unsigned short map_len, unsigned int chunk_ino,
unsigned int nr_inodes, uint16_t cluster_mask,
uint16_t holemask, unsigned int cluster_ino),
TP_ARGS(mp, agno, startino, map_daddr, map_len, chunk_ino, nr_inodes,
TP_PROTO(const struct xfs_perag *pag, xfs_agino_t startino,
xfs_daddr_t map_daddr, unsigned short map_len,
unsigned int chunk_ino, unsigned int nr_inodes,
uint16_t cluster_mask, uint16_t holemask,
unsigned int cluster_ino),
TP_ARGS(pag, startino, map_daddr, map_len, chunk_ino, nr_inodes,
cluster_mask, holemask, cluster_ino),
TP_STRUCT__entry(
__field(dev_t, dev)
@ -792,8 +792,8 @@ TRACE_EVENT(xchk_iallocbt_check_cluster,
__field(uint16_t, holemask)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->startino = startino;
__entry->map_daddr = map_daddr;
__entry->map_len = map_len;
@ -922,7 +922,8 @@ DEFINE_XCHK_FSFREEZE_EVENT(xchk_fsfreeze);
DEFINE_XCHK_FSFREEZE_EVENT(xchk_fsthaw);
TRACE_EVENT(xchk_refcount_incorrect,
TP_PROTO(struct xfs_perag *pag, const struct xfs_refcount_irec *irec,
TP_PROTO(const struct xfs_perag *pag,
const struct xfs_refcount_irec *irec,
xfs_nlink_t seen),
TP_ARGS(pag, irec, seen),
TP_STRUCT__entry(
@ -1918,7 +1919,8 @@ TRACE_EVENT(xchk_dirtree_live_update,
#if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR)
DECLARE_EVENT_CLASS(xrep_extent_class,
TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len),
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
xfs_extlen_t len),
TP_ARGS(pag, agbno, len),
TP_STRUCT__entry(
__field(dev_t, dev)
@ -1940,7 +1942,8 @@ DECLARE_EVENT_CLASS(xrep_extent_class,
);
#define DEFINE_REPAIR_EXTENT_EVENT(name) \
DEFINE_EVENT(xrep_extent_class, name, \
TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len), \
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, \
xfs_extlen_t len), \
TP_ARGS(pag, agbno, len))
DEFINE_REPAIR_EXTENT_EVENT(xreap_dispose_unmap_extent);
DEFINE_REPAIR_EXTENT_EVENT(xreap_dispose_free_extent);
@ -1949,8 +1952,8 @@ DEFINE_REPAIR_EXTENT_EVENT(xreap_bmapi_binval);
DEFINE_REPAIR_EXTENT_EVENT(xrep_agfl_insert);
DECLARE_EVENT_CLASS(xrep_reap_find_class,
TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len,
bool crosslinked),
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
xfs_extlen_t len, bool crosslinked),
TP_ARGS(pag, agbno, len, crosslinked),
TP_STRUCT__entry(
__field(dev_t, dev)
@ -1975,17 +1978,15 @@ DECLARE_EVENT_CLASS(xrep_reap_find_class,
);
#define DEFINE_REPAIR_REAP_FIND_EVENT(name) \
DEFINE_EVENT(xrep_reap_find_class, name, \
TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len, \
bool crosslinked), \
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, \
xfs_extlen_t len, bool crosslinked), \
TP_ARGS(pag, agbno, len, crosslinked))
DEFINE_REPAIR_REAP_FIND_EVENT(xreap_agextent_select);
DEFINE_REPAIR_REAP_FIND_EVENT(xreap_bmapi_select);
DECLARE_EVENT_CLASS(xrep_rmap_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len,
uint64_t owner, uint64_t offset, unsigned int flags),
TP_ARGS(mp, agno, agbno, len, owner, offset, flags),
TRACE_EVENT(xrep_ibt_walk_rmap,
TP_PROTO(const struct xfs_perag *pag, const struct xfs_rmap_irec *rec),
TP_ARGS(pag, rec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -1996,13 +1997,13 @@ DECLARE_EVENT_CLASS(xrep_rmap_class,
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->agbno = agbno;
__entry->len = len;
__entry->owner = owner;
__entry->offset = offset;
__entry->flags = flags;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agbno = rec->rm_startblock;
__entry->len = rec->rm_blockcount;
__entry->owner = rec->rm_owner;
__entry->offset = rec->rm_offset;
__entry->flags = rec->rm_flags;
),
TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
@ -2013,19 +2014,11 @@ DECLARE_EVENT_CLASS(xrep_rmap_class,
__entry->offset,
__entry->flags)
);
#define DEFINE_REPAIR_RMAP_EVENT(name) \
DEFINE_EVENT(xrep_rmap_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
xfs_agblock_t agbno, xfs_extlen_t len, \
uint64_t owner, uint64_t offset, unsigned int flags), \
TP_ARGS(mp, agno, agbno, len, owner, offset, flags))
DEFINE_REPAIR_RMAP_EVENT(xrep_ibt_walk_rmap);
DEFINE_REPAIR_RMAP_EVENT(xrep_bmap_walk_rmap);
TRACE_EVENT(xrep_abt_found,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
TP_PROTO(const struct xfs_perag *pag,
const struct xfs_alloc_rec_incore *rec),
TP_ARGS(mp, agno, rec),
TP_ARGS(pag, rec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -2033,8 +2026,8 @@ TRACE_EVENT(xrep_abt_found,
__field(xfs_extlen_t, blockcount)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->startblock = rec->ar_startblock;
__entry->blockcount = rec->ar_blockcount;
),
@ -2046,9 +2039,9 @@ TRACE_EVENT(xrep_abt_found,
)
TRACE_EVENT(xrep_ibt_found,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
TP_PROTO(const struct xfs_perag *pag,
const struct xfs_inobt_rec_incore *rec),
TP_ARGS(mp, agno, rec),
TP_ARGS(pag, rec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -2059,8 +2052,8 @@ TRACE_EVENT(xrep_ibt_found,
__field(uint64_t, freemask)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->startino = rec->ir_startino;
__entry->holemask = rec->ir_holemask;
__entry->count = rec->ir_count;
@ -2078,7 +2071,8 @@ TRACE_EVENT(xrep_ibt_found,
)
TRACE_EVENT(xrep_refc_found,
TP_PROTO(struct xfs_perag *pag, const struct xfs_refcount_irec *rec),
TP_PROTO(const struct xfs_perag *pag,
const struct xfs_refcount_irec *rec),
TP_ARGS(pag, rec),
TP_STRUCT__entry(
__field(dev_t, dev)
@ -2138,9 +2132,8 @@ TRACE_EVENT(xrep_bmap_found,
);
TRACE_EVENT(xrep_rmap_found,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
const struct xfs_rmap_irec *rec),
TP_ARGS(mp, agno, rec),
TP_PROTO(const struct xfs_perag *pag, const struct xfs_rmap_irec *rec),
TP_ARGS(pag, rec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -2151,8 +2144,8 @@ TRACE_EVENT(xrep_rmap_found,
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agbno = rec->rm_startblock;
__entry->len = rec->rm_blockcount;
__entry->owner = rec->rm_owner;
@ -2170,9 +2163,9 @@ TRACE_EVENT(xrep_rmap_found,
);
TRACE_EVENT(xrep_findroot_block,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
uint32_t magic, uint16_t level),
TP_ARGS(mp, agno, agbno, magic, level),
TP_ARGS(pag, agbno, magic, level),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -2181,8 +2174,8 @@ TRACE_EVENT(xrep_findroot_block,
__field(uint16_t, level)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agbno = agbno;
__entry->magic = magic;
__entry->level = level;
@ -2195,10 +2188,10 @@ TRACE_EVENT(xrep_findroot_block,
__entry->level)
)
TRACE_EVENT(xrep_calc_ag_resblks,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agino_t icount, xfs_agblock_t aglen, xfs_agblock_t freelen,
TP_PROTO(const struct xfs_perag *pag, xfs_agino_t icount,
xfs_agblock_t aglen, xfs_agblock_t freelen,
xfs_agblock_t usedlen),
TP_ARGS(mp, agno, icount, aglen, freelen, usedlen),
TP_ARGS(pag, icount, aglen, freelen, usedlen),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -2208,8 +2201,8 @@ TRACE_EVENT(xrep_calc_ag_resblks,
__field(xfs_agblock_t, usedlen)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->icount = icount;
__entry->aglen = aglen;
__entry->freelen = freelen;
@ -2224,10 +2217,10 @@ TRACE_EVENT(xrep_calc_ag_resblks,
__entry->usedlen)
)
TRACE_EVENT(xrep_calc_ag_resblks_btsize,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t bnobt_sz, xfs_agblock_t inobt_sz,
xfs_agblock_t rmapbt_sz, xfs_agblock_t refcbt_sz),
TP_ARGS(mp, agno, bnobt_sz, inobt_sz, rmapbt_sz, refcbt_sz),
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t bnobt_sz,
xfs_agblock_t inobt_sz, xfs_agblock_t rmapbt_sz,
xfs_agblock_t refcbt_sz),
TP_ARGS(pag, bnobt_sz, inobt_sz, rmapbt_sz, refcbt_sz),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -2237,8 +2230,8 @@ TRACE_EVENT(xrep_calc_ag_resblks_btsize,
__field(xfs_agblock_t, refcbt_sz)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->bnobt_sz = bnobt_sz;
__entry->inobt_sz = inobt_sz;
__entry->rmapbt_sz = rmapbt_sz;
@ -2278,10 +2271,9 @@ TRACE_EVENT(xrep_reset_counters,
)
DECLARE_EVENT_CLASS(xrep_newbt_extent_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len,
int64_t owner),
TP_ARGS(mp, agno, agbno, len, owner),
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
xfs_extlen_t len, int64_t owner),
TP_ARGS(pag, agbno, len, owner),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -2290,8 +2282,8 @@ DECLARE_EVENT_CLASS(xrep_newbt_extent_class,
__field(int64_t, owner)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agbno = agbno;
__entry->len = len;
__entry->owner = owner;
@ -2305,10 +2297,9 @@ DECLARE_EVENT_CLASS(xrep_newbt_extent_class,
);
#define DEFINE_NEWBT_EXTENT_EVENT(name) \
DEFINE_EVENT(xrep_newbt_extent_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
xfs_agblock_t agbno, xfs_extlen_t len, \
int64_t owner), \
TP_ARGS(mp, agno, agbno, len, owner))
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, \
xfs_extlen_t len, int64_t owner), \
TP_ARGS(pag, agbno, len, owner))
DEFINE_NEWBT_EXTENT_EVENT(xrep_newbt_alloc_ag_blocks);
DEFINE_NEWBT_EXTENT_EVENT(xrep_newbt_alloc_file_blocks);
DEFINE_NEWBT_EXTENT_EVENT(xrep_newbt_free_blocks);
@ -2596,7 +2587,7 @@ TRACE_EVENT(xrep_cow_replace_mapping,
);
TRACE_EVENT(xrep_cow_free_staging,
TP_PROTO(struct xfs_perag *pag, xfs_agblock_t agbno,
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
xfs_extlen_t blockcount),
TP_ARGS(pag, agbno, blockcount),
TP_STRUCT__entry(
@ -2652,9 +2643,9 @@ DEFINE_SCRUB_NLINKS_DIFF_EVENT(xrep_nlinks_update_inode);
DEFINE_SCRUB_NLINKS_DIFF_EVENT(xrep_nlinks_unfixable_inode);
TRACE_EVENT(xrep_rmap_live_update,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int op,
TP_PROTO(const struct xfs_perag *pag, unsigned int op,
const struct xfs_rmap_update_params *p),
TP_ARGS(mp, agno, op, p),
TP_ARGS(pag, op, p),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -2666,8 +2657,8 @@ TRACE_EVENT(xrep_rmap_live_update,
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->op = op;
__entry->agbno = p->startblock;
__entry->len = p->blockcount;
@ -3313,7 +3304,7 @@ DEFINE_XREP_SYMLINK_EVENT(xrep_symlink_rebuild);
DEFINE_XREP_SYMLINK_EVENT(xrep_symlink_reset_fork);
TRACE_EVENT(xrep_iunlink_visit,
TP_PROTO(struct xfs_perag *pag, unsigned int bucket,
TP_PROTO(const struct xfs_perag *pag, unsigned int bucket,
xfs_agino_t bucket_agino, struct xfs_inode *ip),
TP_ARGS(pag, bucket, bucket_agino, ip),
TP_STRUCT__entry(
@ -3403,7 +3394,7 @@ TRACE_EVENT(xrep_iunlink_reload_ondisk,
);
TRACE_EVENT(xrep_iunlink_walk_ondisk_bucket,
TP_PROTO(struct xfs_perag *pag, unsigned int bucket,
TP_PROTO(const struct xfs_perag *pag, unsigned int bucket,
xfs_agino_t prev_agino, xfs_agino_t next_agino),
TP_ARGS(pag, bucket, prev_agino, next_agino),
TP_STRUCT__entry(
@ -3429,7 +3420,7 @@ TRACE_EVENT(xrep_iunlink_walk_ondisk_bucket,
);
DECLARE_EVENT_CLASS(xrep_iunlink_resolve_class,
TP_PROTO(struct xfs_perag *pag, unsigned int bucket,
TP_PROTO(const struct xfs_perag *pag, unsigned int bucket,
xfs_agino_t prev_agino, xfs_agino_t next_agino),
TP_ARGS(pag, bucket, prev_agino, next_agino),
TP_STRUCT__entry(
@ -3455,7 +3446,7 @@ DECLARE_EVENT_CLASS(xrep_iunlink_resolve_class,
);
#define DEFINE_REPAIR_IUNLINK_RESOLVE_EVENT(name) \
DEFINE_EVENT(xrep_iunlink_resolve_class, name, \
TP_PROTO(struct xfs_perag *pag, unsigned int bucket, \
TP_PROTO(const struct xfs_perag *pag, unsigned int bucket, \
xfs_agino_t prev_agino, xfs_agino_t next_agino), \
TP_ARGS(pag, bucket, prev_agino, next_agino))
DEFINE_REPAIR_IUNLINK_RESOLVE_EVENT(xrep_iunlink_resolve_uncached);
@ -3516,7 +3507,7 @@ TRACE_EVENT(xrep_iunlink_relink_prev,
);
TRACE_EVENT(xrep_iunlink_add_to_bucket,
TP_PROTO(struct xfs_perag *pag, unsigned int bucket,
TP_PROTO(const struct xfs_perag *pag, unsigned int bucket,
xfs_agino_t agino, xfs_agino_t curr_head),
TP_ARGS(pag, bucket, agino, curr_head),
TP_STRUCT__entry(
@ -3542,7 +3533,7 @@ TRACE_EVENT(xrep_iunlink_add_to_bucket,
);
TRACE_EVENT(xrep_iunlink_commit_bucket,
TP_PROTO(struct xfs_perag *pag, unsigned int bucket,
TP_PROTO(const struct xfs_perag *pag, unsigned int bucket,
xfs_agino_t old_agino, xfs_agino_t agino),
TP_ARGS(pag, bucket, old_agino, agino),
TP_STRUCT__entry(

View File

@ -537,10 +537,14 @@ xfs_can_free_eofblocks(
return false;
/*
* Check if there is an post-EOF extent to free.
* Check if there is an post-EOF extent to free. If there are any
* delalloc blocks attached to the inode (data fork delalloc
* reservations or CoW extents of any kind), we need to free them so
* that inactivation doesn't fail to erase them.
*/
xfs_ilock(ip, XFS_ILOCK_SHARED);
if (xfs_iext_lookup_extent(ip, &ip->i_df, end_fsb, &icur, &imap))
if (ip->i_delayed_blks ||
xfs_iext_lookup_extent(ip, &ip->i_df, end_fsb, &icur, &imap))
found_blocks = true;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return found_blocks;

View File

@ -724,9 +724,8 @@ xlog_recover_do_primary_sb_buffer(
}
/*
* Growfs can also grow the last existing AG. In this case we also need
* to update the length in the in-core perag structure and values
* depending on it.
* If the last AG was grown or shrunk, we also need to update the
* length in the in-core perag structure and values depending on it.
*/
error = xfs_update_last_ag_size(mp, orig_agcount);
if (error)

View File

@ -81,7 +81,7 @@ xfs_discard_endio_work(
struct xfs_busy_extents *extents =
container_of(work, struct xfs_busy_extents, endio_work);
xfs_extent_busy_clear(extents->mount, &extents->extent_list, false);
xfs_extent_busy_clear(&extents->extent_list, false);
kfree(extents->owner);
}
@ -117,11 +117,10 @@ xfs_discard_extents(
blk_start_plug(&plug);
list_for_each_entry(busyp, &extents->extent_list, list) {
trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
busyp->length);
trace_xfs_discard_extent(busyp->pag, busyp->bno, busyp->length);
error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
xfs_agbno_to_daddr(busyp->pag, busyp->bno),
XFS_FSB_TO_BB(mp, busyp->length),
GFP_KERNEL, &bio);
if (error && error != -EOPNOTSUPP) {
@ -239,11 +238,11 @@ xfs_trim_gather_extents(
* overlapping ranges for now.
*/
if (fbno + flen < tcur->start) {
trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen);
trace_xfs_discard_exclude(pag, fbno, flen);
goto next_extent;
}
if (fbno > tcur->end) {
trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen);
trace_xfs_discard_exclude(pag, fbno, flen);
if (tcur->by_bno) {
tcur->count = 0;
break;
@ -261,7 +260,7 @@ xfs_trim_gather_extents(
/* Too small? Give up. */
if (flen < tcur->minlen) {
trace_xfs_discard_toosmall(mp, pag->pag_agno, fbno, flen);
trace_xfs_discard_toosmall(pag, fbno, flen);
if (tcur->by_bno)
goto next_extent;
tcur->count = 0;
@ -272,8 +271,8 @@ xfs_trim_gather_extents(
* If any blocks in the range are still busy, skip the
* discard and try again the next time.
*/
if (xfs_extent_busy_search(mp, pag, fbno, flen)) {
trace_xfs_discard_busy(mp, pag->pag_agno, fbno, flen);
if (xfs_extent_busy_search(pag, fbno, flen)) {
trace_xfs_discard_busy(pag, fbno, flen);
goto next_extent;
}
@ -301,7 +300,7 @@ next_extent:
* we aren't going to issue a discard on them any more.
*/
if (error)
xfs_extent_busy_clear(mp, &extents->extent_list, false);
xfs_extent_busy_clear(&extents->extent_list, false);
out_del_cursor:
xfs_btree_del_cursor(cur, error);
out_trans_cancel:
@ -347,7 +346,6 @@ xfs_trim_perag_extents(
break;
}
extents->mount = pag->pag_mount;
extents->owner = extents;
INIT_LIST_HEAD(&extents->extent_list);

View File

@ -34,14 +34,14 @@ xfs_extent_busy_insert_list(
new = kzalloc(sizeof(struct xfs_extent_busy),
GFP_KERNEL | __GFP_NOFAIL);
new->agno = pag->pag_agno;
new->pag = xfs_perag_hold(pag);
new->bno = bno;
new->length = len;
INIT_LIST_HEAD(&new->list);
new->flags = flags;
/* trace before insert to be able to see failed inserts */
trace_xfs_extent_busy(pag->pag_mount, pag->pag_agno, bno, len);
trace_xfs_extent_busy(pag, bno, len);
spin_lock(&pag->pagb_lock);
rbp = &pag->pagb_tree.rb_node;
@ -101,7 +101,6 @@ xfs_extent_busy_insert_discard(
*/
int
xfs_extent_busy_search(
struct xfs_mount *mp,
struct xfs_perag *pag,
xfs_agblock_t bno,
xfs_extlen_t len)
@ -148,7 +147,6 @@ xfs_extent_busy_search(
*/
STATIC bool
xfs_extent_busy_update_extent(
struct xfs_mount *mp,
struct xfs_perag *pag,
struct xfs_extent_busy *busyp,
xfs_agblock_t fbno,
@ -280,24 +278,22 @@ xfs_extent_busy_update_extent(
ASSERT(0);
}
trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
trace_xfs_extent_busy_reuse(pag, fbno, flen);
return true;
out_force_log:
spin_unlock(&pag->pagb_lock);
xfs_log_force(mp, XFS_LOG_SYNC);
trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
xfs_log_force(pag->pag_mount, XFS_LOG_SYNC);
trace_xfs_extent_busy_force(pag, fbno, flen);
spin_lock(&pag->pagb_lock);
return false;
}
/*
* For a given extent [fbno, flen], make sure we can reuse it safely.
*/
void
xfs_extent_busy_reuse(
struct xfs_mount *mp,
struct xfs_perag *pag,
xfs_agblock_t fbno,
xfs_extlen_t flen,
@ -323,7 +319,7 @@ restart:
continue;
}
if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
if (!xfs_extent_busy_update_extent(pag, busyp, fbno, flen,
userdata))
goto restart;
}
@ -500,8 +496,7 @@ xfs_extent_busy_trim(
out:
if (fbno != *bno || flen != *len) {
trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
fbno, flen);
trace_xfs_extent_busy_trim(args->pag, *bno, *len, fbno, flen);
*bno = fbno;
*len = flen;
*busy_gen = args->pag->pagb_gen;
@ -530,12 +525,12 @@ xfs_extent_busy_clear_one(
busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
return false;
}
trace_xfs_extent_busy_clear(pag->pag_mount, busyp->agno,
busyp->bno, busyp->length);
trace_xfs_extent_busy_clear(pag, busyp->bno, busyp->length);
rb_erase(&busyp->rb_node, &pag->pagb_tree);
}
list_del_init(&busyp->list);
xfs_perag_put(busyp->pag);
kfree(busyp);
return true;
}
@ -547,7 +542,6 @@ xfs_extent_busy_clear_one(
*/
void
xfs_extent_busy_clear(
struct xfs_mount *mp,
struct list_head *list,
bool do_discard)
{
@ -558,10 +552,9 @@ xfs_extent_busy_clear(
return;
do {
struct xfs_perag *pag = xfs_perag_hold(busyp->pag);
bool wakeup = false;
struct xfs_perag *pag;
pag = xfs_perag_get(mp, busyp->agno);
spin_lock(&pag->pagb_lock);
do {
next = list_next_entry(busyp, list);
@ -569,7 +562,7 @@ xfs_extent_busy_clear(
wakeup = true;
busyp = next;
} while (!list_entry_is_head(busyp, list, list) &&
busyp->agno == pag->pag_agno);
busyp->pag == pag);
if (wakeup) {
pag->pagb_gen++;
@ -666,7 +659,7 @@ xfs_extent_busy_ag_cmp(
container_of(l2, struct xfs_extent_busy, list);
s32 diff;
diff = b1->agno - b2->agno;
diff = b1->pag->pag_agno - b2->pag->pag_agno;
if (!diff)
diff = b1->bno - b2->bno;
return diff;

View File

@ -20,7 +20,7 @@ struct xfs_alloc_arg;
struct xfs_extent_busy {
struct rb_node rb_node; /* ag by-bno indexed search tree */
struct list_head list; /* transaction busy extent list */
xfs_agnumber_t agno;
struct xfs_perag *pag;
xfs_agblock_t bno;
xfs_extlen_t length;
unsigned int flags;
@ -33,7 +33,6 @@ struct xfs_extent_busy {
* to discard completion.
*/
struct xfs_busy_extents {
struct xfs_mount *mount;
struct list_head extent_list;
struct work_struct endio_work;
@ -54,16 +53,15 @@ xfs_extent_busy_insert_discard(struct xfs_perag *pag, xfs_agblock_t bno,
xfs_extlen_t len, struct list_head *busy_list);
void
xfs_extent_busy_clear(struct xfs_mount *mp, struct list_head *list,
bool do_discard);
xfs_extent_busy_clear(struct list_head *list, bool do_discard);
int
xfs_extent_busy_search(struct xfs_mount *mp, struct xfs_perag *pag,
xfs_agblock_t bno, xfs_extlen_t len);
xfs_extent_busy_search(struct xfs_perag *pag, xfs_agblock_t bno,
xfs_extlen_t len);
void
xfs_extent_busy_reuse(struct xfs_mount *mp, struct xfs_perag *pag,
xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
xfs_extent_busy_reuse(struct xfs_perag *pag, xfs_agblock_t fbno,
xfs_extlen_t flen, bool userdata);
bool
xfs_extent_busy_trim(struct xfs_alloc_arg *args, xfs_agblock_t *bno,

View File

@ -547,8 +547,8 @@ xfs_agfl_free_finish_item(
error = xfs_alloc_read_agf(xefi->xefi_pag, tp, 0, &agbp);
if (!error)
error = xfs_free_ag_extent(tp, agbp, xefi->xefi_pag->pag_agno,
agbno, 1, &oinfo, XFS_AG_RESV_AGFL);
error = xfs_free_ag_extent(tp, agbp, agbno, 1, &oinfo,
XFS_AG_RESV_AGFL);
xfs_efd_add_extent(efdp, xefi);
xfs_extent_free_cancel_item(&xefi->xefi_list);

View File

@ -227,7 +227,7 @@ xfs_filestream_lookup_association(
trace_xfs_filestream_lookup(pag, ap->ip->i_ino);
ap->blkno = XFS_AGB_TO_FSB(args->mp, pag->pag_agno, 0);
ap->blkno = xfs_agbno_to_fsb(pag, 0);
xfs_bmap_adjacent(ap);
/*
@ -344,7 +344,6 @@ xfs_filestream_select_ag(
struct xfs_alloc_arg *args,
xfs_extlen_t *longest)
{
struct xfs_mount *mp = args->mp;
struct xfs_inode *pip;
xfs_ino_t ino = 0;
int error = 0;
@ -370,7 +369,7 @@ xfs_filestream_select_ag(
return error;
out_select:
ap->blkno = XFS_AGB_TO_FSB(mp, args->pag->pag_agno, 0);
ap->blkno = xfs_agbno_to_fsb(args->pag, 0);
return 0;
}

View File

@ -391,15 +391,11 @@ xfs_getfsmap_datadev_helper(
const struct xfs_rmap_irec *rec,
void *priv)
{
struct xfs_mount *mp = cur->bc_mp;
struct xfs_getfsmap_info *info = priv;
xfs_fsblock_t fsb;
xfs_daddr_t rec_daddr;
fsb = XFS_AGB_TO_FSB(mp, cur->bc_ag.pag->pag_agno, rec->rm_startblock);
rec_daddr = XFS_FSB_TO_DADDR(mp, fsb);
return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr, 0);
return xfs_getfsmap_helper(cur->bc_tp, info, rec,
xfs_agbno_to_daddr(cur->bc_ag.pag, rec->rm_startblock),
0);
}
/* Transform a bnobt irec into a fsmap */
@ -409,13 +405,8 @@ xfs_getfsmap_datadev_bnobt_helper(
const struct xfs_alloc_rec_incore *rec,
void *priv)
{
struct xfs_mount *mp = cur->bc_mp;
struct xfs_getfsmap_info *info = priv;
struct xfs_rmap_irec irec;
xfs_daddr_t rec_daddr;
rec_daddr = XFS_AGB_TO_DADDR(mp, cur->bc_ag.pag->pag_agno,
rec->ar_startblock);
irec.rm_startblock = rec->ar_startblock;
irec.rm_blockcount = rec->ar_blockcount;
@ -423,7 +414,9 @@ xfs_getfsmap_datadev_bnobt_helper(
irec.rm_offset = 0;
irec.rm_flags = 0;
return xfs_getfsmap_helper(cur->bc_tp, info, &irec, rec_daddr, 0);
return xfs_getfsmap_helper(cur->bc_tp, info, &irec,
xfs_agbno_to_daddr(cur->bc_ag.pag, rec->ar_startblock),
0);
}
/* Set rmap flags based on the getfsmap flags */
@ -471,8 +464,7 @@ __xfs_getfsmap_datadev(
struct xfs_btree_cur *bt_cur = NULL;
xfs_fsblock_t start_fsb;
xfs_fsblock_t end_fsb;
xfs_agnumber_t start_ag;
xfs_agnumber_t end_ag;
xfs_agnumber_t start_ag, end_ag, ag;
uint64_t eofs;
int error = 0;
@ -520,7 +512,8 @@ __xfs_getfsmap_datadev(
start_ag = XFS_FSB_TO_AGNO(mp, start_fsb);
end_ag = XFS_FSB_TO_AGNO(mp, end_fsb);
for_each_perag_range(mp, start_ag, end_ag, pag) {
ag = start_ag;
for_each_perag_range(mp, ag, end_ag, pag) {
/*
* Set the AG high key from the fsmap high key if this
* is the last AG that we're querying.

View File

@ -41,7 +41,7 @@ xfs_health_unmount(
for_each_perag(mp, agno, pag) {
xfs_ag_measure_sickness(pag, &sick, &checked);
if (sick) {
trace_xfs_ag_unfixed_corruption(mp, agno, sick);
trace_xfs_ag_unfixed_corruption(pag, sick);
warn = true;
}
}
@ -233,7 +233,7 @@ xfs_ag_mark_sick(
unsigned int mask)
{
ASSERT(!(mask & ~XFS_SICK_AG_ALL));
trace_xfs_ag_mark_sick(pag->pag_mount, pag->pag_agno, mask);
trace_xfs_ag_mark_sick(pag, mask);
spin_lock(&pag->pag_state_lock);
pag->pag_sick |= mask;
@ -247,7 +247,7 @@ xfs_ag_mark_corrupt(
unsigned int mask)
{
ASSERT(!(mask & ~XFS_SICK_AG_ALL));
trace_xfs_ag_mark_corrupt(pag->pag_mount, pag->pag_agno, mask);
trace_xfs_ag_mark_corrupt(pag, mask);
spin_lock(&pag->pag_state_lock);
pag->pag_sick |= mask;
@ -262,7 +262,7 @@ xfs_ag_mark_healthy(
unsigned int mask)
{
ASSERT(!(mask & ~XFS_SICK_AG_ALL));
trace_xfs_ag_mark_healthy(pag->pag_mount, pag->pag_agno, mask);
trace_xfs_ag_mark_healthy(pag, mask);
spin_lock(&pag->pag_state_lock);
pag->pag_sick &= ~mask;

View File

@ -1516,7 +1516,6 @@ xfs_iunlink_reload_next(
struct xfs_perag *pag = agibp->b_pag;
struct xfs_mount *mp = pag->pag_mount;
struct xfs_inode *next_ip = NULL;
xfs_ino_t ino;
int error;
ASSERT(next_agino != NULLAGINO);
@ -1538,8 +1537,8 @@ xfs_iunlink_reload_next(
* but we'd rather shut down now since we're already running in a weird
* situation.
*/
ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, next_agino);
error = xfs_iget(mp, tp, ino, XFS_IGET_UNTRUSTED, 0, &next_ip);
error = xfs_iget(mp, tp, xfs_agino_to_ino(pag, next_agino),
XFS_IGET_UNTRUSTED, 0, &next_ip);
if (error) {
xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
return error;

View File

@ -52,14 +52,14 @@ xfs_iunlink_log_dinode(
struct xfs_trans *tp,
struct xfs_iunlink_item *iup)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_inode *ip = iup->ip;
struct xfs_dinode *dip;
struct xfs_buf *ibp;
xfs_agino_t old_ptr;
int offset;
int error;
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
error = xfs_imap_to_bp(tp->t_mountp, tp, &ip->i_imap, &ibp);
if (error)
return error;
/*
@ -73,22 +73,21 @@ xfs_iunlink_log_dinode(
dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
/* Make sure the old pointer isn't garbage. */
if (be32_to_cpu(dip->di_next_unlinked) != iup->old_agino) {
old_ptr = be32_to_cpu(dip->di_next_unlinked);
if (old_ptr != iup->old_agino) {
xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
goto out;
}
trace_xfs_iunlink_update_dinode(mp, iup->pag->pag_agno,
XFS_INO_TO_AGINO(mp, ip->i_ino),
be32_to_cpu(dip->di_next_unlinked), iup->next_agino);
trace_xfs_iunlink_update_dinode(iup, old_ptr);
dip->di_next_unlinked = cpu_to_be32(iup->next_agino);
offset = ip->i_imap.im_boffset +
offsetof(struct xfs_dinode, di_next_unlinked);
xfs_dinode_calc_crc(mp, dip);
xfs_dinode_calc_crc(tp->t_mountp, dip);
xfs_trans_inode_buf(tp, ibp);
xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
return 0;

View File

@ -100,7 +100,6 @@ xfs_iwalk_ichunk_ra(
struct xfs_inobt_rec_incore *irec)
{
struct xfs_ino_geometry *igeo = M_IGEO(mp);
xfs_agnumber_t agno = pag->pag_agno;
xfs_agblock_t agbno;
struct blk_plug plug;
int i; /* inode chunk index */
@ -114,7 +113,7 @@ xfs_iwalk_ichunk_ra(
imask = xfs_inobt_maskn(i, igeo->inodes_per_cluster);
if (imask & ~irec->ir_free) {
xfs_buf_readahead(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, agbno),
xfs_agbno_to_daddr(pag, agbno),
igeo->blocks_per_cluster * mp->m_bsize,
&xfs_inode_buf_ops);
}
@ -177,14 +176,13 @@ xfs_iwalk_ag_recs(
struct xfs_mount *mp = iwag->mp;
struct xfs_trans *tp = iwag->tp;
struct xfs_perag *pag = iwag->pag;
xfs_ino_t ino;
unsigned int i, j;
int error;
for (i = 0; i < iwag->nr_recs; i++) {
struct xfs_inobt_rec_incore *irec = &iwag->recs[i];
trace_xfs_iwalk_ag_rec(mp, pag->pag_agno, irec);
trace_xfs_iwalk_ag_rec(pag, irec);
if (xfs_pwork_want_abort(&iwag->pwork))
return 0;
@ -208,9 +206,10 @@ xfs_iwalk_ag_recs(
continue;
/* Otherwise call our function. */
ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
irec->ir_startino + j);
error = iwag->iwalk_fn(mp, tp, ino, iwag->data);
error = iwag->iwalk_fn(mp, tp,
xfs_agino_to_ino(pag,
irec->ir_startino + j),
iwag->data);
if (error)
return error;
}
@ -305,7 +304,7 @@ xfs_iwalk_ag_start(
return -EFSCORRUPTED;
}
iwag->lastino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
iwag->lastino = xfs_agino_to_ino(pag,
irec->ir_startino + XFS_INODES_PER_CHUNK - 1);
/*
@ -425,7 +424,7 @@ xfs_iwalk_ag(
break;
/* Make sure that we always move forward. */
rec_fsino = XFS_AGINO_TO_INO(mp, pag->pag_agno, irec->ir_startino);
rec_fsino = xfs_agino_to_ino(pag, irec->ir_startino);
if (iwag->lastino != NULLFSINO &&
XFS_IS_CORRUPT(mp, iwag->lastino >= rec_fsino)) {
xfs_btree_mark_sick(cur);

View File

@ -907,7 +907,7 @@ xlog_cil_committed(
xlog_cil_ail_insert(ctx, abort);
xfs_extent_busy_sort(&ctx->busy_extents.extent_list);
xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list,
xfs_extent_busy_clear(&ctx->busy_extents.extent_list,
xfs_has_discard(mp) && !abort);
spin_lock(&ctx->cil->xc_push_lock);
@ -917,7 +917,6 @@ xlog_cil_committed(
xlog_cil_free_logvec(&ctx->lv_chain);
if (!list_empty(&ctx->busy_extents.extent_list)) {
ctx->busy_extents.mount = mp;
ctx->busy_extents.owner = ctx;
xfs_discard_extents(mp, &ctx->busy_extents);
return;

View File

@ -2726,9 +2726,8 @@ xlog_recover_iunlink_bucket(
agino = be32_to_cpu(agi->agi_unlinked[bucket]);
while (agino != NULLAGINO) {
error = xfs_iget(mp, NULL,
XFS_AGINO_TO_INO(mp, pag->pag_agno, agino),
0, 0, &ip);
error = xfs_iget(mp, NULL, xfs_agino_to_ino(pag, agino), 0, 0,
&ip);
if (error)
break;

View File

@ -32,6 +32,7 @@
#include "xfs_fsmap.h"
#include "xfs_btree_staging.h"
#include "xfs_icache.h"
#include "xfs_iunlink_item.h"
#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_error.h"

View File

@ -74,6 +74,7 @@ struct xfs_refcount_irec;
struct xfs_fsmap;
struct xfs_rmap_irec;
struct xfs_icreate_log;
struct xfs_iunlink_item;
struct xfs_owner_info;
struct xfs_trans_res;
struct xfs_inobt_rec_incore;
@ -181,7 +182,7 @@ TRACE_EVENT(xlog_intent_recovery_failed,
);
DECLARE_EVENT_CLASS(xfs_perag_class,
TP_PROTO(struct xfs_perag *pag, unsigned long caller_ip),
TP_PROTO(const struct xfs_perag *pag, unsigned long caller_ip),
TP_ARGS(pag, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@ -207,7 +208,7 @@ DECLARE_EVENT_CLASS(xfs_perag_class,
#define DEFINE_PERAG_REF_EVENT(name) \
DEFINE_EVENT(xfs_perag_class, name, \
TP_PROTO(struct xfs_perag *pag, unsigned long caller_ip), \
TP_PROTO(const struct xfs_perag *pag, unsigned long caller_ip), \
TP_ARGS(pag, caller_ip))
DEFINE_PERAG_REF_EVENT(xfs_perag_get);
DEFINE_PERAG_REF_EVENT(xfs_perag_hold);
@ -299,15 +300,15 @@ TRACE_EVENT(xfs_inodegc_shrinker_scan,
);
DECLARE_EVENT_CLASS(xfs_ag_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),
TP_ARGS(mp, agno),
TP_PROTO(const struct xfs_perag *pag),
TP_ARGS(pag),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
),
TP_printk("dev %d:%d agno 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
@ -315,8 +316,8 @@ DECLARE_EVENT_CLASS(xfs_ag_class,
);
#define DEFINE_AG_EVENT(name) \
DEFINE_EVENT(xfs_ag_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno), \
TP_ARGS(mp, agno))
TP_PROTO(const struct xfs_perag *pag), \
TP_ARGS(pag))
DEFINE_AG_EVENT(xfs_read_agf);
DEFINE_AG_EVENT(xfs_alloc_read_agf);
@ -662,7 +663,7 @@ DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
DECLARE_EVENT_CLASS(xfs_filestream_class,
TP_PROTO(struct xfs_perag *pag, xfs_ino_t ino),
TP_PROTO(const struct xfs_perag *pag, xfs_ino_t ino),
TP_ARGS(pag, ino),
TP_STRUCT__entry(
__field(dev_t, dev)
@ -684,14 +685,14 @@ DECLARE_EVENT_CLASS(xfs_filestream_class,
)
#define DEFINE_FILESTREAM_EVENT(name) \
DEFINE_EVENT(xfs_filestream_class, name, \
TP_PROTO(struct xfs_perag *pag, xfs_ino_t ino), \
TP_PROTO(const struct xfs_perag *pag, xfs_ino_t ino), \
TP_ARGS(pag, ino))
DEFINE_FILESTREAM_EVENT(xfs_filestream_free);
DEFINE_FILESTREAM_EVENT(xfs_filestream_lookup);
DEFINE_FILESTREAM_EVENT(xfs_filestream_scan);
TRACE_EVENT(xfs_filestream_pick,
TP_PROTO(struct xfs_perag *pag, xfs_ino_t ino),
TP_PROTO(const struct xfs_perag *pag, xfs_ino_t ino),
TP_ARGS(pag, ino),
TP_STRUCT__entry(
__field(dev_t, dev)
@ -898,9 +899,10 @@ TRACE_EVENT(xfs_iomap_prealloc_size,
)
TRACE_EVENT(xfs_irec_merge_pre,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t agino,
uint16_t holemask, xfs_agino_t nagino, uint16_t nholemask),
TP_ARGS(mp, agno, agino, holemask, nagino, nholemask),
TP_PROTO(const struct xfs_perag *pag,
const struct xfs_inobt_rec_incore *rec,
const struct xfs_inobt_rec_incore *nrec),
TP_ARGS(pag, rec, nrec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -910,12 +912,12 @@ TRACE_EVENT(xfs_irec_merge_pre,
__field(uint16_t, nholemask)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->agino = agino;
__entry->holemask = holemask;
__entry->nagino = nagino;
__entry->nholemask = holemask;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agino = rec->ir_startino;
__entry->holemask = rec->ir_holemask;
__entry->nagino = nrec->ir_startino;
__entry->nholemask = nrec->ir_holemask;
),
TP_printk("dev %d:%d agno 0x%x agino 0x%x holemask 0x%x new_agino 0x%x new_holemask 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
@ -927,9 +929,9 @@ TRACE_EVENT(xfs_irec_merge_pre,
)
TRACE_EVENT(xfs_irec_merge_post,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t agino,
uint16_t holemask),
TP_ARGS(mp, agno, agino, holemask),
TP_PROTO(const struct xfs_perag *pag,
const struct xfs_inobt_rec_incore *nrec),
TP_ARGS(pag, nrec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -937,10 +939,10 @@ TRACE_EVENT(xfs_irec_merge_post,
__field(uint16_t, holemask)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->agino = agino;
__entry->holemask = holemask;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agino = nrec->ir_startino;
__entry->holemask = nrec->ir_holemask;
),
TP_printk("dev %d:%d agno 0x%x agino 0x%x holemask 0x%x",
MAJOR(__entry->dev),
@ -1638,9 +1640,9 @@ TRACE_EVENT(xfs_bunmap,
);
DECLARE_EVENT_CLASS(xfs_extent_busy_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len),
TP_ARGS(mp, agno, agbno, len),
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
xfs_extlen_t len),
TP_ARGS(pag, agbno, len),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -1648,8 +1650,8 @@ DECLARE_EVENT_CLASS(xfs_extent_busy_class,
__field(xfs_extlen_t, len)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agbno = agbno;
__entry->len = len;
),
@ -1661,19 +1663,18 @@ DECLARE_EVENT_CLASS(xfs_extent_busy_class,
);
#define DEFINE_BUSY_EVENT(name) \
DEFINE_EVENT(xfs_extent_busy_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
xfs_agblock_t agbno, xfs_extlen_t len), \
TP_ARGS(mp, agno, agbno, len))
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, \
xfs_extlen_t len), \
TP_ARGS(pag, agbno, len))
DEFINE_BUSY_EVENT(xfs_extent_busy);
DEFINE_BUSY_EVENT(xfs_extent_busy_force);
DEFINE_BUSY_EVENT(xfs_extent_busy_reuse);
DEFINE_BUSY_EVENT(xfs_extent_busy_clear);
TRACE_EVENT(xfs_extent_busy_trim,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len,
xfs_agblock_t tbno, xfs_extlen_t tlen),
TP_ARGS(mp, agno, agbno, len, tbno, tlen),
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
xfs_extlen_t len, xfs_agblock_t tbno, xfs_extlen_t tlen),
TP_ARGS(pag, agbno, len, tbno, tlen),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -1683,8 +1684,8 @@ TRACE_EVENT(xfs_extent_busy_trim,
__field(xfs_extlen_t, tlen)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agbno = agbno;
__entry->len = len;
__entry->tbno = tbno;
@ -1762,10 +1763,10 @@ DEFINE_AGF_EVENT(xfs_agf);
DEFINE_AGF_EVENT(xfs_agfl_reset);
TRACE_EVENT(xfs_free_extent,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
xfs_extlen_t len, enum xfs_ag_resv_type resv, int haveleft,
int haveright),
TP_ARGS(mp, agno, agbno, len, resv, haveleft, haveright),
TP_ARGS(pag, agbno, len, resv, haveleft, haveright),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -1776,8 +1777,8 @@ TRACE_EVENT(xfs_free_extent,
__field(int, haveright)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agbno = agbno;
__entry->len = len;
__entry->resv = resv;
@ -2430,9 +2431,9 @@ DEFINE_LOG_RECOVER_ICREATE_ITEM(xfs_log_recover_icreate_cancel);
DEFINE_LOG_RECOVER_ICREATE_ITEM(xfs_log_recover_icreate_recover);
DECLARE_EVENT_CLASS(xfs_discard_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len),
TP_ARGS(mp, agno, agbno, len),
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
xfs_extlen_t len),
TP_ARGS(pag, agbno, len),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -2440,8 +2441,8 @@ DECLARE_EVENT_CLASS(xfs_discard_class,
__field(xfs_extlen_t, len)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agbno = agbno;
__entry->len = len;
),
@ -2454,9 +2455,9 @@ DECLARE_EVENT_CLASS(xfs_discard_class,
#define DEFINE_DISCARD_EVENT(name) \
DEFINE_EVENT(xfs_discard_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
xfs_agblock_t agbno, xfs_extlen_t len), \
TP_ARGS(mp, agno, agbno, len))
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, \
xfs_extlen_t len), \
TP_ARGS(pag, agbno, len))
DEFINE_DISCARD_EVENT(xfs_discard_extent);
DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
DEFINE_DISCARD_EVENT(xfs_discard_exclude);
@ -3143,11 +3144,10 @@ DEFINE_AG_RESV_EVENT(xfs_ag_resv_free_extent);
DEFINE_AG_RESV_EVENT(xfs_ag_resv_critical);
DEFINE_AG_RESV_EVENT(xfs_ag_resv_needed);
/* simple AG-based error/%ip tracepoint class */
DECLARE_EVENT_CLASS(xfs_ag_error_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error,
TRACE_EVENT(xfs_ag_resv_init_error,
TP_PROTO(const struct xfs_perag *pag, int error,
unsigned long caller_ip),
TP_ARGS(mp, agno, error, caller_ip),
TP_ARGS(pag, error, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -3155,8 +3155,8 @@ DECLARE_EVENT_CLASS(xfs_ag_error_class,
__field(unsigned long, caller_ip)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->error = error;
__entry->caller_ip = caller_ip;
),
@ -3167,13 +3167,6 @@ DECLARE_EVENT_CLASS(xfs_ag_error_class,
(char *)__entry->caller_ip)
);
#define DEFINE_AG_ERROR_EVENT(name) \
DEFINE_EVENT(xfs_ag_error_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error, \
unsigned long caller_ip), \
TP_ARGS(mp, agno, error, caller_ip))
DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error);
/* refcount tracepoint classes */
DECLARE_EVENT_CLASS(xfs_refcount_class,
@ -4040,9 +4033,9 @@ DEFINE_TRANS_EVENT(xfs_trans_commit_items);
DEFINE_TRANS_EVENT(xfs_trans_free_items);
TRACE_EVENT(xfs_iunlink_update_bucket,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int bucket,
TP_PROTO(const struct xfs_perag *pag, unsigned int bucket,
xfs_agino_t old_ptr, xfs_agino_t new_ptr),
TP_ARGS(mp, agno, bucket, old_ptr, new_ptr),
TP_ARGS(pag, bucket, old_ptr, new_ptr),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -4051,8 +4044,8 @@ TRACE_EVENT(xfs_iunlink_update_bucket,
__field(xfs_agino_t, new_ptr)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->bucket = bucket;
__entry->old_ptr = old_ptr;
__entry->new_ptr = new_ptr;
@ -4066,9 +4059,8 @@ TRACE_EVENT(xfs_iunlink_update_bucket,
);
TRACE_EVENT(xfs_iunlink_update_dinode,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t agino,
xfs_agino_t old_ptr, xfs_agino_t new_ptr),
TP_ARGS(mp, agno, agino, old_ptr, new_ptr),
TP_PROTO(const struct xfs_iunlink_item *iup, xfs_agino_t old_ptr),
TP_ARGS(iup, old_ptr),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -4077,11 +4069,12 @@ TRACE_EVENT(xfs_iunlink_update_dinode,
__field(xfs_agino_t, new_ptr)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->agino = agino;
__entry->dev = iup->pag->pag_mount->m_super->s_dev;
__entry->agno = iup->pag->pag_agno;
__entry->agino =
XFS_INO_TO_AGINO(iup->ip->i_mount, iup->ip->i_ino);
__entry->old_ptr = old_ptr;
__entry->new_ptr = new_ptr;
__entry->new_ptr = iup->next_agino;
),
TP_printk("dev %d:%d agno 0x%x agino 0x%x old 0x%x new 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
@ -4190,16 +4183,16 @@ DEFINE_FS_CORRUPT_EVENT(xfs_rt_mark_healthy);
DEFINE_FS_CORRUPT_EVENT(xfs_rt_unfixed_corruption);
DECLARE_EVENT_CLASS(xfs_ag_corrupt_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int flags),
TP_ARGS(mp, agno, flags),
TP_PROTO(const struct xfs_perag *pag, unsigned int flags),
TP_ARGS(pag, flags),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->flags = flags;
),
TP_printk("dev %d:%d agno 0x%x flags 0x%x",
@ -4208,9 +4201,8 @@ DECLARE_EVENT_CLASS(xfs_ag_corrupt_class,
);
#define DEFINE_AG_CORRUPT_EVENT(name) \
DEFINE_EVENT(xfs_ag_corrupt_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
unsigned int flags), \
TP_ARGS(mp, agno, flags))
TP_PROTO(const struct xfs_perag *pag, unsigned int flags), \
TP_ARGS(pag, flags))
DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_sick);
DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_corrupt);
DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_healthy);
@ -4242,29 +4234,10 @@ DEFINE_INODE_CORRUPT_EVENT(xfs_inode_mark_corrupt);
DEFINE_INODE_CORRUPT_EVENT(xfs_inode_mark_healthy);
DEFINE_INODE_CORRUPT_EVENT(xfs_inode_unfixed_corruption);
TRACE_EVENT(xfs_iwalk_ag,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agino_t startino),
TP_ARGS(mp, agno, startino),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_agino_t, startino)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->startino = startino;
),
TP_printk("dev %d:%d agno 0x%x startino 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno,
__entry->startino)
)
TRACE_EVENT(xfs_iwalk_ag_rec,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
TP_PROTO(const struct xfs_perag *pag, \
struct xfs_inobt_rec_incore *irec),
TP_ARGS(mp, agno, irec),
TP_ARGS(pag, irec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
@ -4272,8 +4245,8 @@ TRACE_EVENT(xfs_iwalk_ag_rec,
__field(uint64_t, freemask)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->startino = irec->ir_startino;
__entry->freemask = irec->ir_free;
),
@ -4676,7 +4649,7 @@ TRACE_EVENT(xfs_force_shutdown,
#ifdef CONFIG_XFS_DRAIN_INTENTS
DECLARE_EVENT_CLASS(xfs_perag_intents_class,
TP_PROTO(struct xfs_perag *pag, void *caller_ip),
TP_PROTO(const struct xfs_perag *pag, void *caller_ip),
TP_ARGS(pag, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@ -4699,7 +4672,7 @@ DECLARE_EVENT_CLASS(xfs_perag_intents_class,
#define DEFINE_PERAG_INTENTS_EVENT(name) \
DEFINE_EVENT(xfs_perag_intents_class, name, \
TP_PROTO(struct xfs_perag *pag, void *caller_ip), \
TP_PROTO(const struct xfs_perag *pag, void *caller_ip), \
TP_ARGS(pag, caller_ip))
DEFINE_PERAG_INTENTS_EVENT(xfs_perag_intent_hold);
DEFINE_PERAG_INTENTS_EVENT(xfs_perag_intent_rele);

View File

@ -67,7 +67,7 @@ xfs_trans_free(
struct xfs_trans *tp)
{
xfs_extent_busy_sort(&tp->t_busy);
xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
xfs_extent_busy_clear(&tp->t_busy, false);
trace_xfs_trans_free(tp, _RET_IP_);
xfs_trans_clear_context(tp);