xfs: create a generic allocation group structure [v5.5 02/10]

Soon we'll be sharding the realtime volume into separate allocation
 groups.  These rt groups will /mostly/ behave the same as the ones on
 the data device, but since rt groups don't have quite the same set of
 struct fields as perags, let's hoist the parts that will be shared by
 both into a common xfs_group object.
 
 With a bit of luck, this should all go splendidly.
 
 Signed-off-by: Darrick J. Wong <djwong@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQ2qTKExjcn+O1o2YRKO3ySh0YRpgUCZyqQdAAKCRBKO3ySh0YR
 pnJDAQCh14f3aSCHslr4XeG1YkT7NZ44AtMjBqEi5GRN26wC0wD+PeaVSRgt+5wy
 nONT/nFqU5pApe1w2pq78SoJ+vLJxAk=
 =la16
 -----END PGP SIGNATURE-----

Merge tag 'generic-groups-6.13_2024-11-05' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into staging-merge

xfs: create a generic allocation group structure [v5.5 02/10]

Soon we'll be sharding the realtime volume into separate allocation
groups.  These rt groups will /mostly/ behave the same as the ones on
the data device, but since rt groups don't have quite the same set of
struct fields as perags, let's hoist the parts that will be shared by
both into a common xfs_group object.

With a bit of luck, this should all go splendidly.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
Carlos Maiolino 2024-11-12 10:58:27 +01:00
commit 28cf0d1a34
70 changed files with 1380 additions and 1050 deletions

View File

@ -14,6 +14,7 @@ xfs-y += xfs_trace.o
# build the libxfs code first
xfs-y += $(addprefix libxfs/, \
xfs_group.o \
xfs_ag.o \
xfs_alloc.o \
xfs_alloc_btree.o \

View File

@ -30,85 +30,7 @@
#include "xfs_trace.h"
#include "xfs_inode.h"
#include "xfs_icache.h"
/*
* Passive reference counting access wrappers to the perag structures. If the
* per-ag structure is to be freed, the freeing code is responsible for cleaning
* up objects with passive references before freeing the structure. This is
* things like cached buffers.
*/
struct xfs_perag *
xfs_perag_get(
struct xfs_mount *mp,
xfs_agnumber_t agno)
{
struct xfs_perag *pag;
rcu_read_lock();
pag = xa_load(&mp->m_perags, agno);
if (pag) {
trace_xfs_perag_get(pag, _RET_IP_);
ASSERT(atomic_read(&pag->pag_ref) >= 0);
atomic_inc(&pag->pag_ref);
}
rcu_read_unlock();
return pag;
}
/* Get a passive reference to the given perag. */
struct xfs_perag *
xfs_perag_hold(
struct xfs_perag *pag)
{
ASSERT(atomic_read(&pag->pag_ref) > 0 ||
atomic_read(&pag->pag_active_ref) > 0);
trace_xfs_perag_hold(pag, _RET_IP_);
atomic_inc(&pag->pag_ref);
return pag;
}
void
xfs_perag_put(
struct xfs_perag *pag)
{
trace_xfs_perag_put(pag, _RET_IP_);
ASSERT(atomic_read(&pag->pag_ref) > 0);
atomic_dec(&pag->pag_ref);
}
/*
* Active references for perag structures. This is for short term access to the
* per ag structures for walking trees or accessing state. If an AG is being
* shrunk or is offline, then this will fail to find that AG and return NULL
* instead.
*/
struct xfs_perag *
xfs_perag_grab(
struct xfs_mount *mp,
xfs_agnumber_t agno)
{
struct xfs_perag *pag;
rcu_read_lock();
pag = xa_load(&mp->m_perags, agno);
if (pag) {
trace_xfs_perag_grab(pag, _RET_IP_);
if (!atomic_inc_not_zero(&pag->pag_active_ref))
pag = NULL;
}
rcu_read_unlock();
return pag;
}
void
xfs_perag_rele(
struct xfs_perag *pag)
{
trace_xfs_perag_rele(pag, _RET_IP_);
atomic_dec(&pag->pag_active_ref);
}
#include "xfs_group.h"
/*
* xfs_initialize_perag_data
@ -183,6 +105,18 @@ out:
return error;
}
static void
xfs_perag_uninit(
struct xfs_group *xg)
{
#ifdef __KERNEL__
struct xfs_perag *pag = to_perag(xg);
cancel_delayed_work_sync(&pag->pag_blockgc_work);
xfs_buf_cache_destroy(&pag->pag_bcache);
#endif
}
/*
* Free up the per-ag resources within the specified AG range.
*/
@ -195,22 +129,8 @@ xfs_free_perag_range(
{
xfs_agnumber_t agno;
for (agno = first_agno; agno < end_agno; agno++) {
struct xfs_perag *pag = xa_erase(&mp->m_perags, agno);
ASSERT(pag);
XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0);
xfs_defer_drain_free(&pag->pag_intents_drain);
cancel_delayed_work_sync(&pag->pag_blockgc_work);
xfs_buf_cache_destroy(&pag->pag_bcache);
/* drop the mount's active reference */
xfs_perag_rele(pag);
XFS_IS_CORRUPT(pag->pag_mount,
atomic_read(&pag->pag_active_ref) != 0);
kfree_rcu_mightsleep(pag);
}
for (agno = first_agno; agno < end_agno; agno++)
xfs_group_free(mp, agno, XG_TYPE_AG, xfs_perag_uninit);
}
/* Find the size of the AG, in blocks. */
@ -310,19 +230,13 @@ xfs_perag_alloc(
#ifdef __KERNEL__
/* Place kernel structure only init below this point. */
spin_lock_init(&pag->pag_ici_lock);
spin_lock_init(&pag->pagb_lock);
spin_lock_init(&pag->pag_state_lock);
INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
xfs_defer_drain_init(&pag->pag_intents_drain);
init_waitqueue_head(&pag->pagb_wait);
pag->pagb_tree = RB_ROOT;
xfs_hooks_init(&pag->pag_rmap_update_hooks);
#endif /* __KERNEL__ */
error = xfs_buf_cache_init(&pag->pag_bcache);
if (error)
goto out_defer_drain_free;
goto out_free_perag;
/*
* Pre-calculated geometry
@ -332,23 +246,15 @@ xfs_perag_alloc(
__xfs_agino_range(mp, pag->block_count, &pag->agino_min,
&pag->agino_max);
pag->pag_agno = index;
pag->pag_mount = mp;
/* Active ref owned by mount indicates AG is online. */
atomic_set(&pag->pag_active_ref, 1);
error = xa_insert(&mp->m_perags, index, pag, GFP_KERNEL);
if (error) {
WARN_ON_ONCE(error == -EBUSY);
error = xfs_group_insert(mp, pag_group(pag), index, XG_TYPE_AG);
if (error)
goto out_buf_cache_destroy;
}
return 0;
out_buf_cache_destroy:
xfs_buf_cache_destroy(&pag->pag_bcache);
out_defer_drain_free:
xfs_defer_drain_free(&pag->pag_intents_drain);
out_free_perag:
kfree(pag);
return error;
}
@ -833,7 +739,7 @@ xfs_ag_shrink_space(
struct xfs_trans **tpp,
xfs_extlen_t delta)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_alloc_arg args = {
.tp = *tpp,
.mp = mp,
@ -850,7 +756,7 @@ xfs_ag_shrink_space(
xfs_agblock_t aglen;
int error, err2;
ASSERT(pag->pag_agno == mp->m_sb.sb_agcount - 1);
ASSERT(pag_agno(pag) == mp->m_sb.sb_agcount - 1);
error = xfs_ialloc_read_agi(pag, *tpp, 0, &agibp);
if (error)
return error;
@ -947,8 +853,8 @@ xfs_ag_shrink_space(
/* Update perag geometry */
pag->block_count -= delta;
__xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min,
&pag->agino_max);
__xfs_agino_range(mp, pag->block_count, &pag->agino_min,
&pag->agino_max);
xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH);
xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH);
@ -973,12 +879,13 @@ xfs_ag_extend_space(
struct xfs_trans *tp,
xfs_extlen_t len)
{
struct xfs_mount *mp = pag_mount(pag);
struct xfs_buf *bp;
struct xfs_agi *agi;
struct xfs_agf *agf;
int error;
ASSERT(pag->pag_agno == pag->pag_mount->m_sb.sb_agcount - 1);
ASSERT(pag_agno(pag) == mp->m_sb.sb_agcount - 1);
error = xfs_ialloc_read_agi(pag, tp, 0, &bp);
if (error)
@ -1018,8 +925,8 @@ xfs_ag_extend_space(
/* Update perag geometry */
pag->block_count = be32_to_cpu(agf->agf_length);
__xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min,
&pag->agino_max);
__xfs_agino_range(mp, pag->block_count, &pag->agino_min,
&pag->agino_max);
return 0;
}
@ -1046,7 +953,7 @@ xfs_ag_get_geometry(
/* Fill out form. */
memset(ageo, 0, sizeof(*ageo));
ageo->ag_number = pag->pag_agno;
ageo->ag_number = pag_agno(pag);
agi = agi_bp->b_addr;
ageo->ag_icount = be32_to_cpu(agi->agi_count);

View File

@ -7,6 +7,8 @@
#ifndef __LIBXFS_AG_H
#define __LIBXFS_AG_H 1
#include "xfs_group.h"
struct xfs_mount;
struct xfs_trans;
struct xfs_perag;
@ -30,10 +32,7 @@ struct xfs_ag_resv {
* performance of allocation group selection.
*/
struct xfs_perag {
struct xfs_mount *pag_mount; /* owner filesystem */
xfs_agnumber_t pag_agno; /* AG this structure belongs to */
atomic_t pag_ref; /* passive reference count */
atomic_t pag_active_ref; /* active reference count */
struct xfs_group pag_group;
unsigned long pag_opstate;
uint8_t pagf_bno_level; /* # of levels in bno btree */
uint8_t pagf_cnt_level; /* # of levels in cnt btree */
@ -70,13 +69,6 @@ struct xfs_perag {
#ifdef __KERNEL__
/* -- kernel only structures below this line -- */
/*
* Bitsets of per-ag metadata that have been checked and/or are sick.
* Callers should hold pag_state_lock before accessing this field.
*/
uint16_t pag_checked;
uint16_t pag_sick;
#ifdef CONFIG_XFS_ONLINE_REPAIR
/*
* Alternate btree heights so that online repair won't trip the write
@ -88,13 +80,6 @@ struct xfs_perag {
uint8_t pagf_repair_rmap_level;
#endif
spinlock_t pag_state_lock;
spinlock_t pagb_lock; /* lock for pagb_tree */
struct rb_root pagb_tree; /* ordered tree of busy extents */
unsigned int pagb_gen; /* generation count for pagb_tree */
wait_queue_head_t pagb_wait; /* woken when pagb_gen changes */
atomic_t pagf_fstrms; /* # of filestreams active in this AG */
spinlock_t pag_ici_lock; /* incore inode cache lock */
@ -106,21 +91,29 @@ struct xfs_perag {
/* background prealloc block trimming */
struct delayed_work pag_blockgc_work;
/*
* We use xfs_drain to track the number of deferred log intent items
* that have been queued (but not yet processed) so that waiters (e.g.
* scrub) will not lock resources when other threads are in the middle
* of processing a chain of intent items only to find momentary
* inconsistencies.
*/
struct xfs_defer_drain pag_intents_drain;
/* Hook to feed rmapbt updates to an active online repair. */
struct xfs_hooks pag_rmap_update_hooks;
#endif /* __KERNEL__ */
};
static inline struct xfs_perag *to_perag(struct xfs_group *xg)
{
return container_of(xg, struct xfs_perag, pag_group);
}
static inline struct xfs_group *pag_group(struct xfs_perag *pag)
{
return &pag->pag_group;
}
static inline struct xfs_mount *pag_mount(const struct xfs_perag *pag)
{
return pag->pag_group.xg_mount;
}
static inline xfs_agnumber_t pag_agno(const struct xfs_perag *pag)
{
return pag->pag_group.xg_gno;
}
/*
* Per-AG operational state. These are atomic flag bits.
*/
@ -151,13 +144,71 @@ int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno);
int xfs_update_last_ag_size(struct xfs_mount *mp, xfs_agnumber_t prev_agcount);
/* Passive AG references */
struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
struct xfs_perag *xfs_perag_hold(struct xfs_perag *pag);
void xfs_perag_put(struct xfs_perag *pag);
static inline struct xfs_perag *
xfs_perag_get(
struct xfs_mount *mp,
xfs_agnumber_t agno)
{
return to_perag(xfs_group_get(mp, agno, XG_TYPE_AG));
}
static inline struct xfs_perag *
xfs_perag_hold(
struct xfs_perag *pag)
{
return to_perag(xfs_group_hold(pag_group(pag)));
}
static inline void
xfs_perag_put(
struct xfs_perag *pag)
{
xfs_group_put(pag_group(pag));
}
/* Active AG references */
struct xfs_perag *xfs_perag_grab(struct xfs_mount *, xfs_agnumber_t);
void xfs_perag_rele(struct xfs_perag *pag);
static inline struct xfs_perag *
xfs_perag_grab(
struct xfs_mount *mp,
xfs_agnumber_t agno)
{
return to_perag(xfs_group_grab(mp, agno, XG_TYPE_AG));
}
static inline void
xfs_perag_rele(
struct xfs_perag *pag)
{
xfs_group_rele(pag_group(pag));
}
static inline struct xfs_perag *
xfs_perag_next_range(
struct xfs_mount *mp,
struct xfs_perag *pag,
xfs_agnumber_t start_agno,
xfs_agnumber_t end_agno)
{
return to_perag(xfs_group_next_range(mp, pag ? pag_group(pag) : NULL,
start_agno, end_agno, XG_TYPE_AG));
}
static inline struct xfs_perag *
xfs_perag_next_from(
struct xfs_mount *mp,
struct xfs_perag *pag,
xfs_agnumber_t start_agno)
{
return xfs_perag_next_range(mp, pag, start_agno, mp->m_sb.sb_agcount - 1);
}
static inline struct xfs_perag *
xfs_perag_next(
struct xfs_mount *mp,
struct xfs_perag *pag)
{
return xfs_perag_next_from(mp, pag, 0);
}
/*
* Per-ag geometry infomation and validation
@ -224,40 +275,6 @@ xfs_ag_contains_log(struct xfs_mount *mp, xfs_agnumber_t agno)
agno == XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart);
}
/*
* Perag iteration APIs
*/
static inline struct xfs_perag *
xfs_perag_next(
struct xfs_perag *pag,
xfs_agnumber_t *agno,
xfs_agnumber_t end_agno)
{
struct xfs_mount *mp = pag->pag_mount;
*agno = pag->pag_agno + 1;
xfs_perag_rele(pag);
while (*agno <= end_agno) {
pag = xfs_perag_grab(mp, *agno);
if (pag)
return pag;
(*agno)++;
}
return NULL;
}
#define for_each_perag_range(mp, agno, end_agno, pag) \
for ((pag) = xfs_perag_grab((mp), (agno)); \
(pag) != NULL; \
(pag) = xfs_perag_next((pag), &(agno), (end_agno)))
#define for_each_perag_from(mp, agno, pag) \
for_each_perag_range((mp), (agno), (mp)->m_sb.sb_agcount - 1, (pag))
#define for_each_perag(mp, agno, pag) \
(agno) = 0; \
for_each_perag_from((mp), (agno), (pag))
static inline struct xfs_perag *
xfs_perag_next_wrap(
struct xfs_perag *pag,
@ -266,9 +283,9 @@ xfs_perag_next_wrap(
xfs_agnumber_t restart_agno,
xfs_agnumber_t wrap_agno)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
*agno = pag->pag_agno + 1;
*agno = pag_agno(pag) + 1;
xfs_perag_rele(pag);
while (*agno != stop_agno) {
if (*agno >= wrap_agno) {
@ -335,7 +352,7 @@ xfs_agbno_to_fsb(
struct xfs_perag *pag,
xfs_agblock_t agbno)
{
return XFS_AGB_TO_FSB(pag->pag_mount, pag->pag_agno, agbno);
return XFS_AGB_TO_FSB(pag_mount(pag), pag_agno(pag), agbno);
}
static inline xfs_daddr_t
@ -343,7 +360,7 @@ xfs_agbno_to_daddr(
struct xfs_perag *pag,
xfs_agblock_t agbno)
{
return XFS_AGB_TO_DADDR(pag->pag_mount, pag->pag_agno, agbno);
return XFS_AGB_TO_DADDR(pag_mount(pag), pag_agno(pag), agbno);
}
static inline xfs_ino_t
@ -351,7 +368,7 @@ xfs_agino_to_ino(
struct xfs_perag *pag,
xfs_agino_t agino)
{
return XFS_AGINO_TO_INO(pag->pag_mount, pag->pag_agno, agino);
return XFS_AGINO_TO_INO(pag_mount(pag), pag_agno(pag), agino);
}
#endif /* __LIBXFS_AG_H */

View File

@ -70,6 +70,7 @@ xfs_ag_resv_critical(
struct xfs_perag *pag,
enum xfs_ag_resv_type type)
{
struct xfs_mount *mp = pag_mount(pag);
xfs_extlen_t avail;
xfs_extlen_t orig;
@ -92,8 +93,8 @@ xfs_ag_resv_critical(
/* Critically low if less than 10% or max btree height remains. */
return XFS_TEST_ERROR(avail < orig / 10 ||
avail < pag->pag_mount->m_agbtree_maxlevels,
pag->pag_mount, XFS_ERRTAG_AG_RESV_CRITICAL);
avail < mp->m_agbtree_maxlevels,
mp, XFS_ERRTAG_AG_RESV_CRITICAL);
}
/*
@ -137,8 +138,8 @@ __xfs_ag_resv_free(
trace_xfs_ag_resv_free(pag, type, 0);
resv = xfs_perag_resv(pag, type);
if (pag->pag_agno == 0)
pag->pag_mount->m_ag_max_usable += resv->ar_asked;
if (pag_agno(pag) == 0)
pag_mount(pag)->m_ag_max_usable += resv->ar_asked;
/*
* RMAPBT blocks come from the AGFL and AGFL blocks are always
* considered "free", so whatever was reserved at mount time must be
@ -148,7 +149,7 @@ __xfs_ag_resv_free(
oldresv = resv->ar_orig_reserved;
else
oldresv = resv->ar_reserved;
xfs_add_fdblocks(pag->pag_mount, oldresv);
xfs_add_fdblocks(pag_mount(pag), oldresv);
resv->ar_reserved = 0;
resv->ar_asked = 0;
resv->ar_orig_reserved = 0;
@ -170,7 +171,7 @@ __xfs_ag_resv_init(
xfs_extlen_t ask,
xfs_extlen_t used)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_ag_resv *resv;
int error;
xfs_extlen_t hidden_space;
@ -209,7 +210,7 @@ __xfs_ag_resv_init(
trace_xfs_ag_resv_init_error(pag, error, _RET_IP_);
xfs_warn(mp,
"Per-AG reservation for AG %u failed. Filesystem may run out of space.",
pag->pag_agno);
pag_agno(pag));
return error;
}
@ -219,7 +220,7 @@ __xfs_ag_resv_init(
* counter, we only make the adjustment for AG 0. This assumes that
* there aren't any AGs hungrier for per-AG reservation than AG 0.
*/
if (pag->pag_agno == 0)
if (pag_agno(pag) == 0)
mp->m_ag_max_usable -= ask;
resv = xfs_perag_resv(pag, type);
@ -237,7 +238,7 @@ xfs_ag_resv_init(
struct xfs_perag *pag,
struct xfs_trans *tp)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
xfs_extlen_t ask;
xfs_extlen_t used;
int error = 0, error2;

View File

@ -275,7 +275,7 @@ xfs_alloc_complain_bad_rec(
xfs_warn(mp,
"%sbt record corruption in AG %d detected at %pS!",
cur->bc_ops->name, cur->bc_ag.pag->pag_agno, fa);
cur->bc_ops->name, cur->bc_group->xg_gno, fa);
xfs_warn(mp,
"start block 0x%x block count 0x%x", irec->ar_startblock,
irec->ar_blockcount);
@ -303,7 +303,7 @@ xfs_alloc_get_rec(
return error;
xfs_alloc_btrec_to_irec(rec, &irec);
fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
fa = xfs_alloc_check_irec(to_perag(cur->bc_group), &irec);
if (fa)
return xfs_alloc_complain_bad_rec(cur, fa, &irec);
@ -331,7 +331,8 @@ xfs_alloc_compute_aligned(
bool busy;
/* Trim busy sections out of found extent */
busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
busy = xfs_extent_busy_trim(pag_group(args->pag), args->minlen,
args->maxlen, &bno, &len, busy_gen);
/*
* If we have a largish extent that happens to start before min_agbno,
@ -539,7 +540,7 @@ static int
xfs_alloc_fixup_longest(
struct xfs_btree_cur *cnt_cur)
{
struct xfs_perag *pag = cnt_cur->bc_ag.pag;
struct xfs_perag *pag = to_perag(cnt_cur->bc_group);
struct xfs_buf *bp = cnt_cur->bc_ag.agbp;
struct xfs_agf *agf = bp->b_addr;
xfs_extlen_t longest = 0;
@ -799,7 +800,7 @@ xfs_agfl_verify(
* use it by using uncached buffers that don't have the perag attached
* so we can detect and avoid this problem.
*/
if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != pag_agno((bp->b_pag)))
return __this_address;
for (i = 0; i < xfs_agfl_size(mp); i++) {
@ -879,13 +880,12 @@ xfs_alloc_read_agfl(
struct xfs_trans *tp,
struct xfs_buf **bpp)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_buf *bp;
int error;
error = xfs_trans_read_buf(
mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGFL_DADDR(mp)),
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, pag_agno(pag), XFS_AGFL_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
if (xfs_metadata_is_sick(error))
xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
@ -1252,7 +1252,7 @@ xfs_alloc_ag_vextent_small(
if (fbno == NULLAGBLOCK)
goto out;
xfs_extent_busy_reuse(args->pag, fbno, 1,
xfs_extent_busy_reuse(pag_group(args->pag), fbno, 1,
(args->datatype & XFS_ALLOC_NOBUSY));
if (args->datatype & XFS_ALLOC_USERDATA) {
@ -1365,7 +1365,8 @@ xfs_alloc_ag_vextent_exact(
*/
tbno = fbno;
tlen = flen;
xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
xfs_extent_busy_trim(pag_group(args->pag), args->minlen, args->maxlen,
&tbno, &tlen, &busy_gen);
/*
* Give up if the start of the extent is busy, or the freespace isn't
@ -1758,8 +1759,9 @@ restart:
* the allocation can be retried.
*/
trace_xfs_alloc_near_busy(args);
error = xfs_extent_busy_flush(args->tp, args->pag,
acur.busy_gen, alloc_flags);
error = xfs_extent_busy_flush(args->tp,
pag_group(args->pag), acur.busy_gen,
alloc_flags);
if (error)
goto out;
@ -1874,8 +1876,9 @@ restart:
* the allocation can be retried.
*/
trace_xfs_alloc_size_busy(args);
error = xfs_extent_busy_flush(args->tp, args->pag,
busy_gen, alloc_flags);
error = xfs_extent_busy_flush(args->tp,
pag_group(args->pag), busy_gen,
alloc_flags);
if (error)
goto error0;
@ -1973,8 +1976,9 @@ restart:
* the allocation can be retried.
*/
trace_xfs_alloc_size_busy(args);
error = xfs_extent_busy_flush(args->tp, args->pag,
busy_gen, alloc_flags);
error = xfs_extent_busy_flush(args->tp,
pag_group(args->pag), busy_gen,
alloc_flags);
if (error)
goto error0;
@ -2428,7 +2432,7 @@ xfs_alloc_longest_free_extent(
* reservations and AGFL rules in place, we can return this extent.
*/
if (pag->pagf_longest > delta)
return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
return min_t(xfs_extlen_t, pag_mount(pag)->m_ag_max_usable,
pag->pagf_longest - delta);
/* Otherwise, let the caller try for 1 block if there's space. */
@ -2611,7 +2615,7 @@ xfs_agfl_reset(
xfs_warn(mp,
"WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
"Please unmount and run xfs_repair.",
pag->pag_agno, pag->pagf_flcount);
pag_agno(pag), pag->pagf_flcount);
agf->agf_flfirst = 0;
agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
@ -3186,7 +3190,7 @@ xfs_validate_ag_length(
* use it by using uncached buffers that don't have the perag attached
* so we can detect and avoid this problem.
*/
if (bp->b_pag && seqno != bp->b_pag->pag_agno)
if (bp->b_pag && seqno != pag_agno(bp->b_pag))
return __this_address;
/*
@ -3355,13 +3359,13 @@ xfs_read_agf(
int flags,
struct xfs_buf **agfbpp)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
int error;
trace_xfs_read_agf(pag);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGF_DADDR(mp)),
XFS_AG_DADDR(mp, pag_agno(pag), XFS_AGF_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops);
if (xfs_metadata_is_sick(error))
xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF);
@ -3384,6 +3388,7 @@ xfs_alloc_read_agf(
int flags,
struct xfs_buf **agfbpp)
{
struct xfs_mount *mp = pag_mount(pag);
struct xfs_buf *agfbp;
struct xfs_agf *agf;
int error;
@ -3410,7 +3415,7 @@ xfs_alloc_read_agf(
pag->pagf_cnt_level = be32_to_cpu(agf->agf_cnt_level);
pag->pagf_rmap_level = be32_to_cpu(agf->agf_rmap_level);
pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
if (xfs_agfl_needs_reset(pag->pag_mount, agf))
if (xfs_agfl_needs_reset(mp, agf))
set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
else
clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
@ -3423,16 +3428,15 @@ xfs_alloc_read_agf(
* counter only tracks non-root blocks.
*/
allocbt_blks = pag->pagf_btreeblks;
if (xfs_has_rmapbt(pag->pag_mount))
if (xfs_has_rmapbt(mp))
allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
if (allocbt_blks > 0)
atomic64_add(allocbt_blks,
&pag->pag_mount->m_allocbt_blks);
atomic64_add(allocbt_blks, &mp->m_allocbt_blks);
set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
}
#ifdef DEBUG
else if (!xfs_is_shutdown(pag->pag_mount)) {
else if (!xfs_is_shutdown(mp)) {
ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
@ -3614,8 +3618,8 @@ xfs_alloc_vextent_finish(
if (error)
goto out_drop_perag;
ASSERT(!xfs_extent_busy_search(args->pag, args->agbno,
args->len));
ASSERT(!xfs_extent_busy_search(pag_group(args->pag),
args->agbno, args->len));
}
xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
@ -3650,7 +3654,7 @@ xfs_alloc_vextent_this_ag(
int error;
ASSERT(args->pag != NULL);
ASSERT(args->pag->pag_agno == agno);
ASSERT(pag_agno(args->pag) == agno);
args->agno = agno;
args->agbno = 0;
@ -3863,7 +3867,7 @@ xfs_alloc_vextent_exact_bno(
int error;
ASSERT(args->pag != NULL);
ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
ASSERT(pag_agno(args->pag) == XFS_FSB_TO_AGNO(mp, target));
args->agno = XFS_FSB_TO_AGNO(mp, target);
args->agbno = XFS_FSB_TO_AGBNO(mp, target);
@ -3902,7 +3906,7 @@ xfs_alloc_vextent_near_bno(
int error;
if (!needs_perag)
ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
ASSERT(pag_agno(args->pag) == XFS_FSB_TO_AGNO(mp, target));
args->agno = XFS_FSB_TO_AGNO(mp, target);
args->agbno = XFS_FSB_TO_AGBNO(mp, target);
@ -3939,7 +3943,7 @@ xfs_free_extent_fix_freelist(
memset(&args, 0, sizeof(struct xfs_alloc_arg));
args.tp = tp;
args.mp = tp->t_mountp;
args.agno = pag->pag_agno;
args.agno = pag_agno(pag);
args.pag = pag;
/*
@ -4013,7 +4017,7 @@ __xfs_free_extent(
if (skip_discard)
busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
xfs_extent_busy_insert(tp, pag_group(pag), agbno, len, busy_flags);
return 0;
err_release:
@ -4038,7 +4042,7 @@ xfs_alloc_query_range_helper(
xfs_failaddr_t fa;
xfs_alloc_btrec_to_irec(rec, &irec);
fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
fa = xfs_alloc_check_irec(to_perag(cur->bc_group), &irec);
if (fa)
return xfs_alloc_complain_bad_rec(cur, fa, &irec);

View File

@ -248,7 +248,7 @@ struct xfs_extent_free_item {
uint64_t xefi_owner;
xfs_fsblock_t xefi_startblock;/* starting fs block number */
xfs_extlen_t xefi_blockcount;/* number of blocks in extent */
struct xfs_perag *xefi_pag;
struct xfs_group *xefi_group;
unsigned int xefi_flags;
enum xfs_ag_resv_type xefi_agresv;
};

View File

@ -28,7 +28,7 @@ xfs_bnobt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_bnobt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ag.agbp,
cur->bc_ag.pag);
to_perag(cur->bc_group));
}
STATIC struct xfs_btree_cur *
@ -36,29 +36,29 @@ xfs_cntbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_cntbt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ag.agbp,
cur->bc_ag.pag);
to_perag(cur->bc_group));
}
STATIC void
xfs_allocbt_set_root(
struct xfs_btree_cur *cur,
const union xfs_btree_ptr *ptr,
int inc)
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
struct xfs_perag *pag = to_perag(cur->bc_group);
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
ASSERT(ptr->s != 0);
if (xfs_btree_is_bno(cur->bc_ops)) {
agf->agf_bno_root = ptr->s;
be32_add_cpu(&agf->agf_bno_level, inc);
cur->bc_ag.pag->pagf_bno_level += inc;
pag->pagf_bno_level += inc;
} else {
agf->agf_cnt_root = ptr->s;
be32_add_cpu(&agf->agf_cnt_level, inc);
cur->bc_ag.pag->pagf_cnt_level += inc;
pag->pagf_cnt_level += inc;
}
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
@ -75,7 +75,7 @@ xfs_allocbt_alloc_block(
xfs_agblock_t bno;
/* Allocate the new block from the freelist. If we can't, give up. */
error = xfs_alloc_get_freelist(cur->bc_ag.pag, cur->bc_tp,
error = xfs_alloc_get_freelist(to_perag(cur->bc_group), cur->bc_tp,
cur->bc_ag.agbp, &bno, 1);
if (error)
return error;
@ -86,7 +86,7 @@ xfs_allocbt_alloc_block(
}
atomic64_inc(&cur->bc_mp->m_allocbt_blks);
xfs_extent_busy_reuse(cur->bc_ag.pag, bno, 1, false);
xfs_extent_busy_reuse(cur->bc_group, bno, 1, false);
new->s = cpu_to_be32(bno);
@ -104,13 +104,13 @@ xfs_allocbt_free_block(
int error;
bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
error = xfs_alloc_put_freelist(cur->bc_ag.pag, cur->bc_tp, agbp, NULL,
bno, 1);
error = xfs_alloc_put_freelist(to_perag(cur->bc_group), cur->bc_tp,
agbp, NULL, bno, 1);
if (error)
return error;
atomic64_dec(&cur->bc_mp->m_allocbt_blks);
xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1,
xfs_extent_busy_insert(cur->bc_tp, pag_group(agbp->b_pag), bno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
return 0;
}
@ -178,7 +178,7 @@ xfs_allocbt_init_ptr_from_cur(
{
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agf->agf_seqno));
if (xfs_btree_is_bno(cur->bc_ops))
ptr->s = agf->agf_bno_root;
@ -492,7 +492,7 @@ xfs_bnobt_init_cursor(
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_bnobt_ops,
mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_group = xfs_group_hold(pag_group(pag));
cur->bc_ag.agbp = agbp;
if (agbp) {
struct xfs_agf *agf = agbp->b_addr;
@ -518,7 +518,7 @@ xfs_cntbt_init_cursor(
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_cntbt_ops,
mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_group = xfs_group_hold(pag_group(pag));
cur->bc_ag.agbp = agbp;
if (agbp) {
struct xfs_agf *agf = agbp->b_addr;

View File

@ -3280,7 +3280,7 @@ xfs_bmap_longest_free_extent(
}
longest = xfs_alloc_longest_free_extent(pag,
xfs_alloc_min_freelist(pag->pag_mount, pag),
xfs_alloc_min_freelist(pag_mount(pag), pag),
xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
if (*blen < longest)
*blen = longest;

View File

@ -248,7 +248,7 @@ struct xfs_bmap_intent {
enum xfs_bmap_intent_type bi_type;
int bi_whichfork;
struct xfs_inode *bi_owner;
struct xfs_perag *bi_pag;
struct xfs_group *bi_group;
struct xfs_bmbt_irec bi_bmap;
};

View File

@ -225,7 +225,7 @@ __xfs_btree_check_agblock(
struct xfs_buf *bp)
{
struct xfs_mount *mp = cur->bc_mp;
struct xfs_perag *pag = cur->bc_ag.pag;
struct xfs_perag *pag = to_perag(cur->bc_group);
xfs_failaddr_t fa;
xfs_agblock_t agbno;
@ -331,7 +331,7 @@ __xfs_btree_check_ptr(
return -EFSCORRUPTED;
break;
case XFS_BTREE_TYPE_AG:
if (!xfs_verify_agbno(cur->bc_ag.pag,
if (!xfs_verify_agbno(to_perag(cur->bc_group),
be32_to_cpu((&ptr->s)[index])))
return -EFSCORRUPTED;
break;
@ -372,7 +372,7 @@ xfs_btree_check_ptr(
case XFS_BTREE_TYPE_AG:
xfs_err(cur->bc_mp,
"AG %u: Corrupt %sbt pointer at level %d index %d.",
cur->bc_ag.pag->pag_agno, cur->bc_ops->name,
cur->bc_group->xg_gno, cur->bc_ops->name,
level, index);
break;
}
@ -523,20 +523,8 @@ xfs_btree_del_cursor(
ASSERT(!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_bmap.allocated == 0 ||
xfs_is_shutdown(cur->bc_mp) || error != 0);
switch (cur->bc_ops->type) {
case XFS_BTREE_TYPE_AG:
if (cur->bc_ag.pag)
xfs_perag_put(cur->bc_ag.pag);
break;
case XFS_BTREE_TYPE_INODE:
/* nothing to do */
break;
case XFS_BTREE_TYPE_MEM:
if (cur->bc_mem.pag)
xfs_perag_put(cur->bc_mem.pag);
break;
}
if (cur->bc_group)
xfs_group_put(cur->bc_group);
kmem_cache_free(cur->bc_cache, cur);
}
@ -1017,21 +1005,22 @@ xfs_btree_readahead_agblock(
struct xfs_btree_block *block)
{
struct xfs_mount *mp = cur->bc_mp;
struct xfs_perag *pag = to_perag(cur->bc_group);
xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
int rval = 0;
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
xfs_buf_readahead(mp->m_ddev_targp,
xfs_agbno_to_daddr(cur->bc_ag.pag, left),
mp->m_bsize, cur->bc_ops->buf_ops);
xfs_agbno_to_daddr(pag, left), mp->m_bsize,
cur->bc_ops->buf_ops);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
xfs_buf_readahead(mp->m_ddev_targp,
xfs_agbno_to_daddr(cur->bc_ag.pag, right),
mp->m_bsize, cur->bc_ops->buf_ops);
xfs_agbno_to_daddr(pag, right), mp->m_bsize,
cur->bc_ops->buf_ops);
rval++;
}
@ -1090,7 +1079,7 @@ xfs_btree_ptr_to_daddr(
switch (cur->bc_ops->type) {
case XFS_BTREE_TYPE_AG:
*daddr = xfs_agbno_to_daddr(cur->bc_ag.pag,
*daddr = xfs_agbno_to_daddr(to_perag(cur->bc_group),
be32_to_cpu(ptr->s));
break;
case XFS_BTREE_TYPE_INODE:
@ -1312,7 +1301,7 @@ xfs_btree_owner(
case XFS_BTREE_TYPE_INODE:
return cur->bc_ino.ip->i_ino;
case XFS_BTREE_TYPE_AG:
return cur->bc_ag.pag->pag_agno;
return cur->bc_group->xg_gno;
default:
ASSERT(0);
return 0;
@ -4744,7 +4733,7 @@ xfs_btree_agblock_v5hdr_verify(
return __this_address;
if (block->bb_u.s.bb_blkno != cpu_to_be64(xfs_buf_daddr(bp)))
return __this_address;
if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag_agno(pag))
return __this_address;
return NULL;
}

View File

@ -254,6 +254,7 @@ struct xfs_btree_cur
union xfs_btree_irec bc_rec; /* current insert/search record value */
uint8_t bc_nlevels; /* number of levels in the tree */
uint8_t bc_maxlevels; /* maximum levels for this btree type */
struct xfs_group *bc_group;
/* per-type information */
union {
@ -264,13 +265,11 @@ struct xfs_btree_cur
struct xbtree_ifakeroot *ifake; /* for staging cursor */
} bc_ino;
struct {
struct xfs_perag *pag;
struct xfs_buf *agbp;
struct xbtree_afakeroot *afake; /* for staging cursor */
} bc_ag;
struct {
struct xfbtree *xfbtree;
struct xfs_perag *pag;
} bc_mem;
};

View File

@ -57,10 +57,8 @@ xfbtree_dup_cursor(
ncur->bc_flags = cur->bc_flags;
ncur->bc_nlevels = cur->bc_nlevels;
ncur->bc_mem.xfbtree = cur->bc_mem.xfbtree;
if (cur->bc_mem.pag)
ncur->bc_mem.pag = xfs_perag_hold(cur->bc_mem.pag);
if (cur->bc_group)
ncur->bc_group = xfs_group_hold(cur->bc_group);
return ncur;
}

225
fs/xfs/libxfs/xfs_group.c Normal file
View File

@ -0,0 +1,225 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 Red Hat, Inc.
*/
#include "xfs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_extent_busy.h"
#include "xfs_group.h"
/*
* Groups can have passive and active references.
*
* For passive references the code freeing a group is responsible for cleaning
* up objects that hold the passive references (e.g. cached buffers).
* Routines manipulating passive references are xfs_group_get, xfs_group_hold
* and xfs_group_put.
*
* Active references are for short term access to the group for walking trees or
* accessing state. If a group is being shrunk or offlined, the lookup will fail
* to find that group and return NULL instead.
* Routines manipulating active references are xfs_group_grab and
* xfs_group_rele.
*/
struct xfs_group *
xfs_group_get(
struct xfs_mount *mp,
uint32_t index,
enum xfs_group_type type)
{
struct xfs_group *xg;
rcu_read_lock();
xg = xa_load(&mp->m_groups[type].xa, index);
if (xg) {
trace_xfs_group_get(xg, _RET_IP_);
ASSERT(atomic_read(&xg->xg_ref) >= 0);
atomic_inc(&xg->xg_ref);
}
rcu_read_unlock();
return xg;
}
struct xfs_group *
xfs_group_hold(
struct xfs_group *xg)
{
ASSERT(atomic_read(&xg->xg_ref) > 0 ||
atomic_read(&xg->xg_active_ref) > 0);
trace_xfs_group_hold(xg, _RET_IP_);
atomic_inc(&xg->xg_ref);
return xg;
}
void
xfs_group_put(
struct xfs_group *xg)
{
trace_xfs_group_put(xg, _RET_IP_);
ASSERT(atomic_read(&xg->xg_ref) > 0);
atomic_dec(&xg->xg_ref);
}
struct xfs_group *
xfs_group_grab(
struct xfs_mount *mp,
uint32_t index,
enum xfs_group_type type)
{
struct xfs_group *xg;
rcu_read_lock();
xg = xa_load(&mp->m_groups[type].xa, index);
if (xg) {
trace_xfs_group_grab(xg, _RET_IP_);
if (!atomic_inc_not_zero(&xg->xg_active_ref))
xg = NULL;
}
rcu_read_unlock();
return xg;
}
/*
* Iterate to the next group. To start the iteration at @start_index, a %NULL
* @xg is passed, else the previous group returned from this function. The
* caller should break out of the loop when this returns %NULL. If the caller
* wants to break out of a loop that did not finish it needs to release the
* active reference to @xg using xfs_group_rele() itself.
*/
struct xfs_group *
xfs_group_next_range(
struct xfs_mount *mp,
struct xfs_group *xg,
uint32_t start_index,
uint32_t end_index,
enum xfs_group_type type)
{
uint32_t index = start_index;
if (xg) {
index = xg->xg_gno + 1;
xfs_group_rele(xg);
}
if (index > end_index)
return NULL;
return xfs_group_grab(mp, index, type);
}
/*
* Find the next group after @xg, or the first group if @xg is NULL.
*/
struct xfs_group *
xfs_group_grab_next_mark(
struct xfs_mount *mp,
struct xfs_group *xg,
xa_mark_t mark,
enum xfs_group_type type)
{
unsigned long index = 0;
if (xg) {
index = xg->xg_gno + 1;
xfs_group_rele(xg);
}
rcu_read_lock();
xg = xa_find(&mp->m_groups[type].xa, &index, ULONG_MAX, mark);
if (xg) {
trace_xfs_group_grab_next_tag(xg, _RET_IP_);
if (!atomic_inc_not_zero(&xg->xg_active_ref))
xg = NULL;
}
rcu_read_unlock();
return xg;
}
void
xfs_group_rele(
struct xfs_group *xg)
{
trace_xfs_group_rele(xg, _RET_IP_);
atomic_dec(&xg->xg_active_ref);
}
void
xfs_group_free(
struct xfs_mount *mp,
uint32_t index,
enum xfs_group_type type,
void (*uninit)(struct xfs_group *xg))
{
struct xfs_group *xg = xa_erase(&mp->m_groups[type].xa, index);
XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_ref) != 0);
xfs_defer_drain_free(&xg->xg_intents_drain);
#ifdef __KERNEL__
kfree(xg->xg_busy_extents);
#endif
if (uninit)
uninit(xg);
/* drop the mount's active reference */
xfs_group_rele(xg);
XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) != 0);
kfree_rcu_mightsleep(xg);
}
int
xfs_group_insert(
struct xfs_mount *mp,
struct xfs_group *xg,
uint32_t index,
enum xfs_group_type type)
{
int error;
xg->xg_mount = mp;
xg->xg_gno = index;
xg->xg_type = type;
#ifdef __KERNEL__
xg->xg_busy_extents = xfs_extent_busy_alloc();
if (!xg->xg_busy_extents)
return -ENOMEM;
spin_lock_init(&xg->xg_state_lock);
xfs_hooks_init(&xg->xg_rmap_update_hooks);
#endif
xfs_defer_drain_init(&xg->xg_intents_drain);
/* Active ref owned by mount indicates group is online. */
atomic_set(&xg->xg_active_ref, 1);
error = xa_insert(&mp->m_groups[type].xa, index, xg, GFP_KERNEL);
if (error) {
WARN_ON_ONCE(error == -EBUSY);
goto out_drain;
}
return 0;
out_drain:
xfs_defer_drain_free(&xg->xg_intents_drain);
#ifdef __KERNEL__
kfree(xg->xg_busy_extents);
#endif
return error;
}
struct xfs_group *
xfs_group_get_by_fsb(
struct xfs_mount *mp,
xfs_fsblock_t fsbno,
enum xfs_group_type type)
{
return xfs_group_get(mp, xfs_fsb_to_gno(mp, fsbno, type), type);
}

131
fs/xfs/libxfs/xfs_group.h Normal file
View File

@ -0,0 +1,131 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018 Red Hat, Inc.
*/
#ifndef __LIBXFS_GROUP_H
#define __LIBXFS_GROUP_H 1
struct xfs_group {
struct xfs_mount *xg_mount;
uint32_t xg_gno;
enum xfs_group_type xg_type;
atomic_t xg_ref; /* passive reference count */
atomic_t xg_active_ref; /* active reference count */
#ifdef __KERNEL__
/* -- kernel only structures below this line -- */
/*
* Track freed but not yet committed extents.
*/
struct xfs_extent_busy_tree *xg_busy_extents;
/*
* Bitsets of per-ag metadata that have been checked and/or are sick.
* Callers should hold xg_state_lock before accessing this field.
*/
uint16_t xg_checked;
uint16_t xg_sick;
spinlock_t xg_state_lock;
/*
* We use xfs_drain to track the number of deferred log intent items
* that have been queued (but not yet processed) so that waiters (e.g.
* scrub) will not lock resources when other threads are in the middle
* of processing a chain of intent items only to find momentary
* inconsistencies.
*/
struct xfs_defer_drain xg_intents_drain;
/*
* Hook to feed rmapbt updates to an active online repair.
*/
struct xfs_hooks xg_rmap_update_hooks;
#endif /* __KERNEL__ */
};
struct xfs_group *xfs_group_get(struct xfs_mount *mp, uint32_t index,
enum xfs_group_type type);
struct xfs_group *xfs_group_get_by_fsb(struct xfs_mount *mp,
xfs_fsblock_t fsbno, enum xfs_group_type type);
struct xfs_group *xfs_group_hold(struct xfs_group *xg);
void xfs_group_put(struct xfs_group *xg);
struct xfs_group *xfs_group_grab(struct xfs_mount *mp, uint32_t index,
enum xfs_group_type type);
struct xfs_group *xfs_group_next_range(struct xfs_mount *mp,
struct xfs_group *xg, uint32_t start_index, uint32_t end_index,
enum xfs_group_type type);
struct xfs_group *xfs_group_grab_next_mark(struct xfs_mount *mp,
struct xfs_group *xg, xa_mark_t mark, enum xfs_group_type type);
void xfs_group_rele(struct xfs_group *xg);
void xfs_group_free(struct xfs_mount *mp, uint32_t index,
enum xfs_group_type type, void (*uninit)(struct xfs_group *xg));
int xfs_group_insert(struct xfs_mount *mp, struct xfs_group *xg,
uint32_t index, enum xfs_group_type);
#define xfs_group_set_mark(_xg, _mark) \
xa_set_mark(&(_xg)->xg_mount->m_groups[(_xg)->xg_type].xa, \
(_xg)->xg_gno, (_mark))
#define xfs_group_clear_mark(_xg, _mark) \
xa_clear_mark(&(_xg)->xg_mount->m_groups[(_xg)->xg_type].xa, \
(_xg)->xg_gno, (_mark))
#define xfs_group_marked(_mp, _type, _mark) \
xa_marked(&(_mp)->m_groups[(_type)].xa, (_mark))
static inline xfs_agblock_t
xfs_group_max_blocks(
struct xfs_group *xg)
{
return xg->xg_mount->m_groups[xg->xg_type].blocks;
}
static inline xfs_fsblock_t
xfs_group_start_fsb(
struct xfs_group *xg)
{
return ((xfs_fsblock_t)xg->xg_gno) <<
xg->xg_mount->m_groups[xg->xg_type].blklog;
}
static inline xfs_fsblock_t
xfs_gbno_to_fsb(
struct xfs_group *xg,
xfs_agblock_t gbno)
{
return xfs_group_start_fsb(xg) | gbno;
}
static inline xfs_daddr_t
xfs_gbno_to_daddr(
struct xfs_group *xg,
xfs_agblock_t gbno)
{
struct xfs_mount *mp = xg->xg_mount;
uint32_t blocks = mp->m_groups[xg->xg_type].blocks;
return XFS_FSB_TO_BB(mp, (xfs_fsblock_t)xg->xg_gno * blocks + gbno);
}
static inline uint32_t
xfs_fsb_to_gno(
struct xfs_mount *mp,
xfs_fsblock_t fsbno,
enum xfs_group_type type)
{
if (!mp->m_groups[type].blklog)
return 0;
return fsbno >> mp->m_groups[type].blklog;
}
static inline xfs_agblock_t
xfs_fsb_to_gbno(
struct xfs_mount *mp,
xfs_fsblock_t fsbno,
enum xfs_group_type type)
{
return fsbno & mp->m_groups[type].blkmask;
}
#endif /* __LIBXFS_GROUP_H */

View File

@ -6,6 +6,8 @@
#ifndef __XFS_HEALTH_H__
#define __XFS_HEALTH_H__
struct xfs_group;
/*
* In-Core Filesystem Health Assessments
* =====================================
@ -197,10 +199,12 @@ void xfs_rt_measure_sickness(struct xfs_mount *mp, unsigned int *sick,
void xfs_agno_mark_sick(struct xfs_mount *mp, xfs_agnumber_t agno,
unsigned int mask);
void xfs_ag_mark_sick(struct xfs_perag *pag, unsigned int mask);
void xfs_ag_mark_corrupt(struct xfs_perag *pag, unsigned int mask);
void xfs_ag_mark_healthy(struct xfs_perag *pag, unsigned int mask);
void xfs_ag_measure_sickness(struct xfs_perag *pag, unsigned int *sick,
void xfs_group_mark_sick(struct xfs_group *xg, unsigned int mask);
#define xfs_ag_mark_sick(pag, mask) \
xfs_group_mark_sick(pag_group(pag), (mask))
void xfs_group_mark_corrupt(struct xfs_group *xg, unsigned int mask);
void xfs_group_mark_healthy(struct xfs_group *xg, unsigned int mask);
void xfs_group_measure_sickness(struct xfs_group *xg, unsigned int *sick,
unsigned int *checked);
void xfs_inode_mark_sick(struct xfs_inode *ip, unsigned int mask);
@ -227,22 +231,19 @@ xfs_fs_has_sickness(struct xfs_mount *mp, unsigned int mask)
}
static inline bool
xfs_rt_has_sickness(struct xfs_mount *mp, unsigned int mask)
xfs_group_has_sickness(
struct xfs_group *xg,
unsigned int mask)
{
unsigned int sick, checked;
unsigned int sick, checked;
xfs_rt_measure_sickness(mp, &sick, &checked);
return sick & mask;
}
static inline bool
xfs_ag_has_sickness(struct xfs_perag *pag, unsigned int mask)
{
unsigned int sick, checked;
xfs_ag_measure_sickness(pag, &sick, &checked);
xfs_group_measure_sickness(xg, &sick, &checked);
return sick & mask;
}
#define xfs_ag_has_sickness(pag, mask) \
xfs_group_has_sickness(pag_group(pag), (mask))
#define xfs_ag_is_healthy(pag) \
(!xfs_ag_has_sickness((pag), UINT_MAX))
static inline bool
xfs_inode_has_sickness(struct xfs_inode *ip, unsigned int mask)
@ -259,18 +260,6 @@ xfs_fs_is_healthy(struct xfs_mount *mp)
return !xfs_fs_has_sickness(mp, -1U);
}
static inline bool
xfs_rt_is_healthy(struct xfs_mount *mp)
{
return !xfs_rt_has_sickness(mp, -1U);
}
static inline bool
xfs_ag_is_healthy(struct xfs_perag *pag)
{
return !xfs_ag_has_sickness(pag, -1U);
}
static inline bool
xfs_inode_is_healthy(struct xfs_inode *ip)
{

View File

@ -142,7 +142,7 @@ xfs_inobt_complain_bad_rec(
xfs_warn(mp,
"%sbt record corruption in AG %d detected at %pS!",
cur->bc_ops->name, cur->bc_ag.pag->pag_agno, fa);
cur->bc_ops->name, cur->bc_group->xg_gno, fa);
xfs_warn(mp,
"start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
irec->ir_startino, irec->ir_count, irec->ir_freecount,
@ -170,7 +170,7 @@ xfs_inobt_get_rec(
return error;
xfs_inobt_btrec_to_irec(mp, rec, irec);
fa = xfs_inobt_check_irec(cur->bc_ag.pag, irec);
fa = xfs_inobt_check_irec(to_perag(cur->bc_group), irec);
if (fa)
return xfs_inobt_complain_bad_rec(cur, fa, irec);
@ -275,8 +275,10 @@ xfs_check_agi_freecount(
}
} while (i == 1);
if (!xfs_is_shutdown(cur->bc_mp))
ASSERT(freecount == cur->bc_ag.pag->pagi_freecount);
if (!xfs_is_shutdown(cur->bc_mp)) {
ASSERT(freecount ==
to_perag(cur->bc_group)->pagi_freecount);
}
}
return 0;
}
@ -551,7 +553,7 @@ xfs_inobt_insert_sprec(
struct xfs_buf *agbp,
struct xfs_inobt_rec_incore *nrec) /* in/out: new/merged rec. */
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_btree_cur *cur;
int error;
int i;
@ -645,7 +647,7 @@ xfs_finobt_insert_sprec(
struct xfs_buf *agbp,
struct xfs_inobt_rec_incore *nrec) /* in/out: new rec. */
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_btree_cur *cur;
int error;
int i;
@ -880,7 +882,7 @@ sparse_alloc:
* rather than a linear progression to prevent the next generation
* number from being easily guessable.
*/
error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno,
error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag_agno(pag),
args.agbno, args.len, get_random_u32());
if (error)
@ -1071,7 +1073,7 @@ xfs_dialloc_check_ino(
if (error)
return -EAGAIN;
error = xfs_imap_to_bp(pag->pag_mount, tp, &imap, &bp);
error = xfs_imap_to_bp(pag_mount(pag), tp, &imap, &bp);
if (error)
return -EAGAIN;
@ -1122,7 +1124,7 @@ xfs_dialloc_ag_inobt(
/*
* If in the same AG as the parent, try to get near the parent.
*/
if (pagno == pag->pag_agno) {
if (pagno == pag_agno(pag)) {
int doneleft; /* done, to the left */
int doneright; /* done, to the right */
@ -1599,7 +1601,7 @@ xfs_dialloc_ag(
* parent. If so, find the closest available inode to the parent. If
* not, consider the agi hint or find the first free inode in the AG.
*/
if (pag->pag_agno == pagno)
if (pag_agno(pag) == pagno)
error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
else
error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
@ -2053,7 +2055,7 @@ xfs_difree_inobt(
struct xfs_icluster *xic,
struct xfs_inobt_rec_incore *orec)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_agi *agi = agbp->b_addr;
struct xfs_btree_cur *cur;
struct xfs_inobt_rec_incore rec;
@ -2187,7 +2189,7 @@ xfs_difree_finobt(
xfs_agino_t agino,
struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_btree_cur *cur;
struct xfs_inobt_rec_incore rec;
int offset = agino - ibtrec->ir_startino;
@ -2310,9 +2312,9 @@ xfs_difree(
/*
* Break up inode number into its components.
*/
if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) {
xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).",
__func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno);
if (pag_agno(pag) != XFS_INO_TO_AGNO(mp, inode)) {
xfs_warn(mp, "%s: agno != pag_agno(pag) (%d != %d).",
__func__, XFS_INO_TO_AGNO(mp, inode), pag_agno(pag));
ASSERT(0);
return -EINVAL;
}
@ -2373,7 +2375,7 @@ xfs_imap_lookup(
xfs_agblock_t *offset_agbno,
int flags)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_inobt_rec_incore rec;
struct xfs_btree_cur *cur;
struct xfs_buf *agbp;
@ -2384,7 +2386,7 @@ xfs_imap_lookup(
if (error) {
xfs_alert(mp,
"%s: xfs_ialloc_read_agi() returned error %d, agno %d",
__func__, error, pag->pag_agno);
__func__, error, pag_agno(pag));
return error;
}
@ -2434,7 +2436,7 @@ xfs_imap(
struct xfs_imap *imap, /* location map structure */
uint flags) /* flags for inode btree lookup */
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
xfs_agblock_t agbno; /* block number of inode in the alloc group */
xfs_agino_t agino; /* inode number within alloc group */
xfs_agblock_t chunk_agbno; /* first block in inode chunk */
@ -2726,13 +2728,13 @@ xfs_read_agi(
xfs_buf_flags_t flags,
struct xfs_buf **agibpp)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
int error;
trace_xfs_read_agi(pag);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)),
XFS_AG_DADDR(mp, pag_agno(pag), XFS_AGI_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), flags, agibpp, &xfs_agi_buf_ops);
if (xfs_metadata_is_sick(error))
xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
@ -2780,7 +2782,7 @@ xfs_ialloc_read_agi(
* we are in the middle of a forced shutdown.
*/
ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
xfs_is_shutdown(pag->pag_mount));
xfs_is_shutdown(pag_mount(pag)));
if (agibpp)
*agibpp = agibp;
else
@ -2880,7 +2882,7 @@ xfs_ialloc_count_inodes_rec(
xfs_failaddr_t fa;
xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
fa = xfs_inobt_check_irec(cur->bc_ag.pag, &irec);
fa = xfs_inobt_check_irec(to_perag(cur->bc_group), &irec);
if (fa)
return xfs_inobt_complain_bad_rec(cur, fa, &irec);
@ -3119,13 +3121,13 @@ xfs_ialloc_check_shrink(
int has;
int error;
if (!xfs_has_sparseinodes(pag->pag_mount))
if (!xfs_has_sparseinodes(pag_mount(pag)))
return 0;
cur = xfs_inobt_init_cursor(pag, tp, agibp);
/* Look up the inobt record that would correspond to the new EOFS. */
agino = XFS_AGB_TO_AGINO(pag->pag_mount, new_length);
agino = XFS_AGB_TO_AGINO(pag_mount(pag), new_length);
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has);
if (error || !has)
goto out;

View File

@ -37,7 +37,7 @@ STATIC struct xfs_btree_cur *
xfs_inobt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_inobt_init_cursor(cur->bc_ag.pag, cur->bc_tp,
return xfs_inobt_init_cursor(to_perag(cur->bc_group), cur->bc_tp,
cur->bc_ag.agbp);
}
@ -45,7 +45,7 @@ STATIC struct xfs_btree_cur *
xfs_finobt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_finobt_init_cursor(cur->bc_ag.pag, cur->bc_tp,
return xfs_finobt_init_cursor(to_perag(cur->bc_group), cur->bc_tp,
cur->bc_ag.agbp);
}
@ -112,7 +112,7 @@ __xfs_inobt_alloc_block(
memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.pag = cur->bc_ag.pag;
args.pag = to_perag(cur->bc_group);
args.oinfo = XFS_RMAP_OINFO_INOBT;
args.minlen = 1;
args.maxlen = 1;
@ -248,7 +248,7 @@ xfs_inobt_init_ptr_from_cur(
{
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agi->agi_seqno));
ptr->s = agi->agi_root;
}
@ -260,7 +260,8 @@ xfs_finobt_init_ptr_from_cur(
{
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agi->agi_seqno));
ptr->s = agi->agi_free_root;
}
@ -478,12 +479,12 @@ xfs_inobt_init_cursor(
struct xfs_trans *tp,
struct xfs_buf *agbp)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_btree_cur *cur;
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_inobt_ops,
M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_group = xfs_group_hold(pag_group(pag));
cur->bc_ag.agbp = agbp;
if (agbp) {
struct xfs_agi *agi = agbp->b_addr;
@ -504,12 +505,12 @@ xfs_finobt_init_cursor(
struct xfs_trans *tp,
struct xfs_buf *agbp)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_btree_cur *cur;
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_finobt_ops,
M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_group = xfs_group_hold(pag_group(pag));
cur->bc_ag.agbp = agbp;
if (agbp) {
struct xfs_agi *agi = agbp->b_addr;
@ -715,7 +716,7 @@ static xfs_extlen_t
xfs_inobt_max_size(
struct xfs_perag *pag)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
xfs_agblock_t agblocks = pag->block_count;
/* Bail out if we're uninitialized, which can happen in mkfs. */
@ -727,7 +728,7 @@ xfs_inobt_max_size(
* never be available for the kinds of things that would require btree
* expansion. We therefore can pretend the space isn't there.
*/
if (xfs_ag_contains_log(mp, pag->pag_agno))
if (xfs_ag_contains_log(mp, pag_agno(pag)))
agblocks -= mp->m_sb.sb_logblocks;
return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
@ -791,10 +792,10 @@ xfs_finobt_calc_reserves(
xfs_extlen_t tree_len = 0;
int error;
if (!xfs_has_finobt(pag->pag_mount))
if (!xfs_has_finobt(pag_mount(pag)))
return 0;
if (xfs_has_inobtcounts(pag->pag_mount))
if (xfs_has_inobtcounts(pag_mount(pag)))
error = xfs_finobt_read_blocks(pag, tp, &tree_len);
else
error = xfs_finobt_count_blocks(pag, tp, &tree_len);

View File

@ -154,7 +154,7 @@ xfs_refcount_complain_bad_rec(
xfs_warn(mp,
"Refcount BTree record corruption in AG %d detected at %pS!",
cur->bc_ag.pag->pag_agno, fa);
cur->bc_group->xg_gno, fa);
xfs_warn(mp,
"Start block 0x%x, block count 0x%x, references 0x%x",
irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount);
@ -180,7 +180,7 @@ xfs_refcount_get_rec(
return error;
xfs_refcount_btrec_to_irec(rec, irec);
fa = xfs_refcount_check_irec(cur->bc_ag.pag, irec);
fa = xfs_refcount_check_irec(to_perag(cur->bc_group), irec);
if (fa)
return xfs_refcount_complain_bad_rec(cur, fa, irec);
@ -1154,7 +1154,7 @@ xfs_refcount_adjust_extents(
goto out_error;
}
} else {
fsbno = xfs_agbno_to_fsb(cur->bc_ag.pag,
fsbno = xfs_agbno_to_fsb(to_perag(cur->bc_group),
tmp.rc_startblock);
error = xfs_free_extent_later(cur->bc_tp, fsbno,
tmp.rc_blockcount, NULL,
@ -1216,7 +1216,7 @@ xfs_refcount_adjust_extents(
}
goto advloop;
} else {
fsbno = xfs_agbno_to_fsb(cur->bc_ag.pag,
fsbno = xfs_agbno_to_fsb(to_perag(cur->bc_group),
ext.rc_startblock);
error = xfs_free_extent_later(cur->bc_tp, fsbno,
ext.rc_blockcount, NULL,
@ -1310,7 +1310,7 @@ xfs_refcount_continue_op(
xfs_agblock_t new_agbno)
{
struct xfs_mount *mp = cur->bc_mp;
struct xfs_perag *pag = cur->bc_ag.pag;
struct xfs_perag *pag = to_perag(cur->bc_group);
if (XFS_IS_CORRUPT(mp, !xfs_verify_agbext(pag, new_agbno,
ri->ri_blockcount))) {
@ -1321,7 +1321,7 @@ xfs_refcount_continue_op(
ri->ri_startblock = xfs_agbno_to_fsb(pag, new_agbno);
ASSERT(xfs_verify_fsbext(mp, ri->ri_startblock, ri->ri_blockcount));
ASSERT(pag->pag_agno == XFS_FSB_TO_AGNO(mp, ri->ri_startblock));
ASSERT(pag_agno(pag) == XFS_FSB_TO_AGNO(mp, ri->ri_startblock));
return 0;
}
@ -1358,7 +1358,7 @@ xfs_refcount_finish_one(
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) {
if (rcur != NULL && rcur->bc_group != ri->ri_group) {
nr_ops = rcur->bc_refc.nr_ops;
shape_changes = rcur->bc_refc.shape_changes;
xfs_btree_del_cursor(rcur, 0);
@ -1366,13 +1366,14 @@ xfs_refcount_finish_one(
*pcur = NULL;
}
if (rcur == NULL) {
error = xfs_alloc_read_agf(ri->ri_pag, tp,
struct xfs_perag *pag = to_perag(ri->ri_group);
error = xfs_alloc_read_agf(pag, tp,
XFS_ALLOC_FLAG_FREEING, &agbp);
if (error)
return error;
*pcur = rcur = xfs_refcountbt_init_cursor(mp, tp, agbp,
ri->ri_pag);
*pcur = rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
rcur->bc_refc.nr_ops = nr_ops;
rcur->bc_refc.shape_changes = shape_changes;
}
@ -1878,7 +1879,8 @@ xfs_refcount_recover_extent(
INIT_LIST_HEAD(&rr->rr_list);
xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
if (xfs_refcount_check_irec(cur->bc_ag.pag, &rr->rr_rrec) != NULL ||
if (xfs_refcount_check_irec(to_perag(cur->bc_group), &rr->rr_rrec) !=
NULL ||
XFS_IS_CORRUPT(cur->bc_mp,
rr->rr_rrec.rc_domain != XFS_REFC_DOMAIN_COW)) {
xfs_btree_mark_sick(cur);
@ -2026,7 +2028,7 @@ xfs_refcount_query_range_helper(
xfs_failaddr_t fa;
xfs_refcount_btrec_to_irec(rec, &irec);
fa = xfs_refcount_check_irec(cur->bc_ag.pag, &irec);
fa = xfs_refcount_check_irec(to_perag(cur->bc_group), &irec);
if (fa)
return xfs_refcount_complain_bad_rec(cur, fa, &irec);

View File

@ -56,7 +56,7 @@ enum xfs_refcount_intent_type {
struct xfs_refcount_intent {
struct list_head ri_list;
struct xfs_perag *ri_pag;
struct xfs_group *ri_group;
enum xfs_refcount_intent_type ri_type;
xfs_extlen_t ri_blockcount;
xfs_fsblock_t ri_startblock;

View File

@ -30,7 +30,7 @@ xfs_refcountbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
cur->bc_ag.agbp, cur->bc_ag.pag);
cur->bc_ag.agbp, to_perag(cur->bc_group));
}
STATIC void
@ -68,7 +68,7 @@ xfs_refcountbt_alloc_block(
memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.pag = cur->bc_ag.pag;
args.pag = to_perag(cur->bc_group);
args.oinfo = XFS_RMAP_OINFO_REFC;
args.minlen = args.maxlen = args.prod = 1;
args.resv = XFS_AG_RESV_METADATA;
@ -81,7 +81,7 @@ xfs_refcountbt_alloc_block(
*stat = 0;
return 0;
}
ASSERT(args.agno == cur->bc_ag.pag->pag_agno);
ASSERT(args.agno == cur->bc_group->xg_gno);
ASSERT(args.len == 1);
new->s = cpu_to_be32(args.agbno);
@ -169,7 +169,7 @@ xfs_refcountbt_init_ptr_from_cur(
{
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_refcount_root;
}
@ -361,11 +361,11 @@ xfs_refcountbt_init_cursor(
{
struct xfs_btree_cur *cur;
ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
ASSERT(pag_agno(pag) < mp->m_sb.sb_agcount);
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_refcountbt_ops,
mp->m_refc_maxlevels, xfs_refcountbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_group = xfs_group_hold(pag_group(pag));
cur->bc_refc.nr_ops = 0;
cur->bc_refc.shape_changes = 0;
cur->bc_ag.agbp = agbp;
@ -514,7 +514,7 @@ xfs_refcountbt_calc_reserves(
* never be available for the kinds of things that would require btree
* expansion. We therefore can pretend the space isn't there.
*/
if (xfs_ag_contains_log(mp, pag->pag_agno))
if (xfs_ag_contains_log(mp, pag_agno(pag)))
agblocks -= mp->m_sb.sb_logblocks;
*ask += xfs_refcountbt_max_size(mp, agblocks);

View File

@ -213,7 +213,7 @@ xfs_rmap_check_irec(
struct xfs_perag *pag,
const struct xfs_rmap_irec *irec)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
bool is_inode;
bool is_unwritten;
bool is_bmbt;
@ -269,9 +269,7 @@ xfs_rmap_check_btrec(
struct xfs_btree_cur *cur,
const struct xfs_rmap_irec *irec)
{
if (xfs_btree_is_mem_rmap(cur->bc_ops))
return xfs_rmap_check_irec(cur->bc_mem.pag, irec);
return xfs_rmap_check_irec(cur->bc_ag.pag, irec);
return xfs_rmap_check_irec(to_perag(cur->bc_group), irec);
}
static inline int
@ -288,7 +286,7 @@ xfs_rmap_complain_bad_rec(
else
xfs_warn(mp,
"Reverse Mapping BTree record corruption in AG %d detected at %pS!",
cur->bc_ag.pag->pag_agno, fa);
cur->bc_group->xg_gno, fa);
xfs_warn(mp,
"Owner 0x%llx, flags 0x%x, start block 0x%x block count 0x%x",
irec->rm_owner, irec->rm_flags, irec->rm_startblock,
@ -835,7 +833,7 @@ xfs_rmap_hook_enable(void)
static inline void
xfs_rmap_update_hook(
struct xfs_trans *tp,
struct xfs_perag *pag,
struct xfs_group *xg,
enum xfs_rmap_intent_type op,
xfs_agblock_t startblock,
xfs_extlen_t blockcount,
@ -850,27 +848,27 @@ xfs_rmap_update_hook(
.oinfo = *oinfo, /* struct copy */
};
if (pag)
xfs_hooks_call(&pag->pag_rmap_update_hooks, op, &p);
if (xg)
xfs_hooks_call(&xg->xg_rmap_update_hooks, op, &p);
}
}
/* Call the specified function during a reverse mapping update. */
int
xfs_rmap_hook_add(
struct xfs_perag *pag,
struct xfs_group *xg,
struct xfs_rmap_hook *hook)
{
return xfs_hooks_add(&pag->pag_rmap_update_hooks, &hook->rmap_hook);
return xfs_hooks_add(&xg->xg_rmap_update_hooks, &hook->rmap_hook);
}
/* Stop calling the specified function during a reverse mapping update. */
void
xfs_rmap_hook_del(
struct xfs_perag *pag,
struct xfs_group *xg,
struct xfs_rmap_hook *hook)
{
xfs_hooks_del(&pag->pag_rmap_update_hooks, &hook->rmap_hook);
xfs_hooks_del(&xg->xg_rmap_update_hooks, &hook->rmap_hook);
}
/* Configure rmap update hook functions. */
@ -905,7 +903,8 @@ xfs_rmap_free(
return 0;
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
xfs_rmap_update_hook(tp, pag, XFS_RMAP_UNMAP, bno, len, false, oinfo);
xfs_rmap_update_hook(tp, pag_group(pag), XFS_RMAP_UNMAP, bno, len,
false, oinfo);
error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
xfs_btree_del_cursor(cur, error);
@ -1149,7 +1148,8 @@ xfs_rmap_alloc(
return 0;
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
xfs_rmap_update_hook(tp, pag, XFS_RMAP_MAP, bno, len, false, oinfo);
xfs_rmap_update_hook(tp, pag_group(pag), XFS_RMAP_MAP, bno, len, false,
oinfo);
error = xfs_rmap_map(cur, bno, len, false, oinfo);
xfs_btree_del_cursor(cur, error);
@ -2586,28 +2586,30 @@ xfs_rmap_finish_one(
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) {
if (rcur != NULL && rcur->bc_group != ri->ri_group) {
xfs_btree_del_cursor(rcur, 0);
rcur = NULL;
*pcur = NULL;
}
if (rcur == NULL) {
struct xfs_perag *pag = to_perag(ri->ri_group);
/*
* Refresh the freelist before we start changing the
* rmapbt, because a shape change could cause us to
* allocate blocks.
*/
error = xfs_free_extent_fix_freelist(tp, ri->ri_pag, &agbp);
error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
if (error) {
xfs_ag_mark_sick(ri->ri_pag, XFS_SICK_AG_AGFL);
xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
return error;
}
if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
xfs_ag_mark_sick(ri->ri_pag, XFS_SICK_AG_AGFL);
xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
return -EFSCORRUPTED;
}
*pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, ri->ri_pag);
*pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
}
xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork,
@ -2620,7 +2622,7 @@ xfs_rmap_finish_one(
if (error)
return error;
xfs_rmap_update_hook(tp, ri->ri_pag, ri->ri_type, bno,
xfs_rmap_update_hook(tp, ri->ri_group, ri->ri_type, bno,
ri->ri_bmap.br_blockcount, unwritten, &oinfo);
return 0;
}

View File

@ -173,7 +173,7 @@ struct xfs_rmap_intent {
int ri_whichfork;
uint64_t ri_owner;
struct xfs_bmbt_irec ri_bmap;
struct xfs_perag *ri_pag;
struct xfs_group *ri_group;
};
/* functions for updating the rmapbt based on bmbt map/unmap operations */
@ -264,8 +264,8 @@ struct xfs_rmap_hook {
void xfs_rmap_hook_disable(void);
void xfs_rmap_hook_enable(void);
int xfs_rmap_hook_add(struct xfs_perag *pag, struct xfs_rmap_hook *hook);
void xfs_rmap_hook_del(struct xfs_perag *pag, struct xfs_rmap_hook *hook);
int xfs_rmap_hook_add(struct xfs_group *xg, struct xfs_rmap_hook *hook);
void xfs_rmap_hook_del(struct xfs_group *xg, struct xfs_rmap_hook *hook);
void xfs_rmap_hook_setup(struct xfs_rmap_hook *hook, notifier_fn_t mod_fn);
#endif

View File

@ -57,7 +57,7 @@ xfs_rmapbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
cur->bc_ag.agbp, cur->bc_ag.pag);
cur->bc_ag.agbp, to_perag(cur->bc_group));
}
STATIC void
@ -66,14 +66,15 @@ xfs_rmapbt_set_root(
const union xfs_btree_ptr *ptr,
int inc)
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
struct xfs_perag *pag = to_perag(cur->bc_group);
ASSERT(ptr->s != 0);
agf->agf_rmap_root = ptr->s;
be32_add_cpu(&agf->agf_rmap_level, inc);
cur->bc_ag.pag->pagf_rmap_level += inc;
pag->pagf_rmap_level += inc;
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
}
@ -87,7 +88,7 @@ xfs_rmapbt_alloc_block(
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
struct xfs_perag *pag = cur->bc_ag.pag;
struct xfs_perag *pag = to_perag(cur->bc_group);
struct xfs_alloc_arg args = { .len = 1 };
int error;
xfs_agblock_t bno;
@ -102,7 +103,7 @@ xfs_rmapbt_alloc_block(
return 0;
}
xfs_extent_busy_reuse(pag, bno, 1, false);
xfs_extent_busy_reuse(pag_group(pag), bno, 1, false);
new->s = cpu_to_be32(bno);
be32_add_cpu(&agf->agf_rmap_blocks, 1);
@ -125,7 +126,7 @@ xfs_rmapbt_free_block(
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
struct xfs_perag *pag = cur->bc_ag.pag;
struct xfs_perag *pag = to_perag(cur->bc_group);
xfs_agblock_t bno;
int error;
@ -136,7 +137,7 @@ xfs_rmapbt_free_block(
if (error)
return error;
xfs_extent_busy_insert(cur->bc_tp, pag, bno, 1,
xfs_extent_busy_insert(cur->bc_tp, pag_group(pag), bno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1);
@ -227,7 +228,7 @@ xfs_rmapbt_init_ptr_from_cur(
{
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_rmap_root;
}
@ -538,7 +539,7 @@ xfs_rmapbt_init_cursor(
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rmapbt_ops,
mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_group = xfs_group_hold(pag_group(pag));
cur->bc_ag.agbp = agbp;
if (agbp) {
struct xfs_agf *agf = agbp->b_addr;
@ -647,14 +648,13 @@ xfs_rmapbt_mem_cursor(
struct xfbtree *xfbt)
{
struct xfs_btree_cur *cur;
struct xfs_mount *mp = pag->pag_mount;
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rmapbt_mem_ops,
cur = xfs_btree_alloc_cursor(pag_mount(pag), tp, &xfs_rmapbt_mem_ops,
xfs_rmapbt_maxlevels_ondisk(), xfs_rmapbt_cur_cache);
cur->bc_mem.xfbtree = xfbt;
cur->bc_nlevels = xfbt->nlevels;
cur->bc_mem.pag = xfs_perag_hold(pag);
cur->bc_group = xfs_group_hold(pag_group(pag));
return cur;
}
@ -863,7 +863,7 @@ xfs_rmapbt_calc_reserves(
* never be available for the kinds of things that would require btree
* expansion. We therefore can pretend the space isn't there.
*/
if (xfs_ag_contains_log(mp, pag->pag_agno))
if (xfs_ag_contains_log(mp, pag_agno(pag)))
agblocks -= mp->m_sb.sb_logblocks;
/* Reserve 1% of the AG or enough for 1 block per record. */

View File

@ -1002,6 +1002,8 @@ xfs_sb_mount_common(
struct xfs_mount *mp,
struct xfs_sb *sbp)
{
struct xfs_groups *ags = &mp->m_groups[XG_TYPE_AG];
mp->m_agfrotor = 0;
atomic_set(&mp->m_agirotor, 0);
mp->m_maxagi = mp->m_sb.sb_agcount;
@ -1012,6 +1014,11 @@ xfs_sb_mount_common(
mp->m_blockmask = sbp->sb_blocksize - 1;
mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
mp->m_blockwmask = mp->m_blockwsize - 1;
ags->blocks = mp->m_sb.sb_agblocks;
ags->blklog = mp->m_sb.sb_agblklog;
ags->blkmask = xfs_mask32lo(mp->m_sb.sb_agblklog);
xfs_mount_sb_set_rextsize(mp, sbp);
mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, true);
@ -1123,18 +1130,17 @@ int
xfs_update_secondary_sbs(
struct xfs_mount *mp)
{
struct xfs_perag *pag;
xfs_agnumber_t agno = 1;
struct xfs_perag *pag = NULL;
int saved_error = 0;
int error = 0;
LIST_HEAD (buffer_list);
/* update secondary superblocks. */
for_each_perag_from(mp, agno, pag) {
while ((pag = xfs_perag_next_from(mp, pag, 1))) {
struct xfs_buf *bp;
error = xfs_buf_get(mp->m_ddev_targp,
XFS_AG_DADDR(mp, pag->pag_agno, XFS_SB_DADDR),
XFS_AG_DADDR(mp, pag_agno(pag), XFS_SB_DADDR),
XFS_FSS_TO_BB(mp, 1), &bp);
/*
* If we get an error reading or writing alternate superblocks,
@ -1146,7 +1152,7 @@ xfs_update_secondary_sbs(
if (error) {
xfs_warn(mp,
"error allocating secondary superblock for ag %d",
pag->pag_agno);
pag_agno(pag));
if (!saved_error)
saved_error = error;
continue;
@ -1160,26 +1166,22 @@ xfs_update_secondary_sbs(
xfs_buf_relse(bp);
/* don't hold too many buffers at once */
if (agno % 16)
if (pag_agno(pag) % 16)
continue;
error = xfs_buf_delwri_submit(&buffer_list);
if (error) {
xfs_warn(mp,
"write error %d updating a secondary superblock near ag %d",
error, pag->pag_agno);
error, pag_agno(pag));
if (!saved_error)
saved_error = error;
continue;
}
}
error = xfs_buf_delwri_submit(&buffer_list);
if (error) {
xfs_warn(mp,
"write error %d updating a secondary superblock near ag %d",
error, agno);
}
if (error)
xfs_warn(mp, "error %d writing secondary superblocks", error);
return saved_error ? saved_error : error;
}

View File

@ -170,13 +170,12 @@ xfs_icount_range(
unsigned long long *max)
{
unsigned long long nr_inos = 0;
struct xfs_perag *pag;
xfs_agnumber_t agno;
struct xfs_perag *pag = NULL;
/* root, rtbitmap, rtsum all live in the first chunk */
*min = XFS_INODES_PER_CHUNK;
for_each_perag(mp, agno, pag)
while ((pag = xfs_perag_next(mp, pag)))
nr_inos += pag->agino_max - pag->agino_min + 1;
*max = nr_inos;
}

View File

@ -212,6 +212,14 @@ enum xbtree_recpacking {
XBTREE_RECPACKING_FULL,
};
enum xfs_group_type {
XG_TYPE_AG,
XG_TYPE_MAX,
} __packed;
#define XG_TYPE_STRINGS \
{ XG_TYPE_AG, "ag" }
/*
* Type verifier functions
*/

View File

@ -208,7 +208,7 @@ xrep_agf_init_header(
memset(agf, 0, BBTOB(agf_bp->b_length));
agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
agf->agf_seqno = cpu_to_be32(pag->pag_agno);
agf->agf_seqno = cpu_to_be32(pag_agno(pag));
agf->agf_length = cpu_to_be32(pag->block_count);
agf->agf_flfirst = old_agf->agf_flfirst;
agf->agf_fllast = old_agf->agf_fllast;
@ -384,7 +384,7 @@ xrep_agf(
* was corrupt after xfs_alloc_read_agf failed with -EFSCORRUPTED.
*/
error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, sc->sa.pag->pag_agno,
XFS_AG_DADDR(mp, pag_agno(sc->sa.pag),
XFS_AGF_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &agf_bp, NULL);
if (error)
@ -687,7 +687,7 @@ xrep_agfl_init_header(
agfl = XFS_BUF_TO_AGFL(agfl_bp);
memset(agfl, 0xFF, BBTOB(agfl_bp->b_length));
agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
agfl->agfl_seqno = cpu_to_be32(sc->sa.pag->pag_agno);
agfl->agfl_seqno = cpu_to_be32(pag_agno(sc->sa.pag));
uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
/*
@ -741,7 +741,7 @@ xrep_agfl(
* was corrupt after xfs_alloc_read_agfl failed with -EFSCORRUPTED.
*/
error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, sc->sa.pag->pag_agno,
XFS_AG_DADDR(mp, pag_agno(sc->sa.pag),
XFS_AGFL_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &agfl_bp, NULL);
if (error)
@ -897,7 +897,7 @@ xrep_agi_init_header(
memset(agi, 0, BBTOB(agi_bp->b_length));
agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
agi->agi_seqno = cpu_to_be32(pag->pag_agno);
agi->agi_seqno = cpu_to_be32(pag_agno(pag));
agi->agi_length = cpu_to_be32(pag->block_count);
agi->agi_newino = cpu_to_be32(NULLAGINO);
agi->agi_dirino = cpu_to_be32(NULLAGINO);
@ -1112,9 +1112,9 @@ xrep_iunlink_igrab(
struct xfs_perag *pag,
struct xfs_inode *ip)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag_agno(pag))
return false;
if (!xfs_inode_on_unlinked_list(ip))
@ -1138,7 +1138,7 @@ xrep_iunlink_visit(
unsigned int bucket;
int error;
ASSERT(XFS_INO_TO_AGNO(mp, ip->i_ino) == ragi->sc->sa.pag->pag_agno);
ASSERT(XFS_INO_TO_AGNO(mp, ip->i_ino) == pag_agno(ragi->sc->sa.pag));
ASSERT(xfs_inode_on_unlinked_list(ip));
agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
@ -1169,7 +1169,7 @@ xrep_iunlink_mark_incore(
struct xrep_agi *ragi)
{
struct xfs_perag *pag = ragi->sc->sa.pag;
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
uint32_t first_index = 0;
bool done = false;
unsigned int nr_found = 0;
@ -1209,7 +1209,7 @@ xrep_iunlink_mark_incore(
* us to see this inode, so another lookup from the
* same index will not find it again.
*/
if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag_agno(pag))
continue;
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
@ -1761,7 +1761,7 @@ xrep_agi(
* was corrupt after xfs_ialloc_read_agi failed with -EFSCORRUPTED.
*/
error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, sc->sa.pag->pag_agno,
XFS_AG_DADDR(mp, pag_agno(sc->sa.pag),
XFS_AGI_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &ragi->agi_bp, NULL);
if (error)

View File

@ -139,7 +139,7 @@ xchk_allocbt_rec(
struct xchk_alloc *ca = bs->private;
xfs_alloc_btrec_to_irec(rec, &irec);
if (xfs_alloc_check_irec(bs->cur->bc_ag.pag, &irec) != NULL) {
if (xfs_alloc_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) {
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
return 0;
}

View File

@ -132,17 +132,16 @@ int
xrep_setup_ag_allocbt(
struct xfs_scrub *sc)
{
struct xfs_group *xg = pag_group(sc->sa.pag);
unsigned int busy_gen;
/*
* Make sure the busy extent list is clear because we can't put extents
* on there twice.
*/
busy_gen = READ_ONCE(sc->sa.pag->pagb_gen);
if (xfs_extent_busy_list_empty(sc->sa.pag))
if (xfs_extent_busy_list_empty(xg, &busy_gen))
return 0;
return xfs_extent_busy_flush(sc->tp, sc->sa.pag, busy_gen, 0);
return xfs_extent_busy_flush(sc->tp, xg, busy_gen, 0);
}
/* Check for any obvious conflicts in the free extent. */
@ -543,7 +542,7 @@ xrep_abt_dispose_one(
/* Add a deferred rmap for each extent we used. */
if (resv->used > 0)
xfs_rmap_alloc_extent(sc->tp, pag->pag_agno, resv->agbno,
xfs_rmap_alloc_extent(sc->tp, pag_agno(pag), resv->agbno,
resv->used, XFS_RMAP_OWN_AG);
/*
@ -849,6 +848,7 @@ xrep_allocbt(
{
struct xrep_abt *ra;
struct xfs_mount *mp = sc->mp;
unsigned int busy_gen;
char *descr;
int error;
@ -869,7 +869,7 @@ xrep_allocbt(
* on there twice. In theory we cleared this before we started, but
* let's not risk the filesystem.
*/
if (!xfs_extent_busy_list_empty(sc->sa.pag)) {
if (!xfs_extent_busy_list_empty(pag_group(sc->sa.pag), &busy_gen)) {
error = -EDEADLOCK;
goto out_ra;
}

View File

@ -601,7 +601,8 @@ xchk_bmap_check_rmap(
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
check_rec.rm_offset);
if (irec.br_startblock !=
xfs_agbno_to_fsb(cur->bc_ag.pag, check_rec.rm_startblock))
xfs_agbno_to_fsb(to_perag(cur->bc_group),
check_rec.rm_startblock))
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
check_rec.rm_offset);
if (irec.br_blockcount > check_rec.rm_blockcount)
@ -760,11 +761,10 @@ xchk_bmap_check_rmaps(
struct xfs_scrub *sc,
int whichfork)
{
struct xfs_perag *pag;
xfs_agnumber_t agno;
struct xfs_perag *pag = NULL;
int error;
for_each_perag(sc->mp, agno, pag) {
while ((pag = xfs_perag_next(sc->mp, pag))) {
error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag);
if (error ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) {

View File

@ -196,7 +196,7 @@ xrep_bmap_check_fork_rmap(
return -EFSCORRUPTED;
/* Check that this is within the AG. */
if (!xfs_verify_agbext(cur->bc_ag.pag, rec->rm_startblock,
if (!xfs_verify_agbext(to_perag(cur->bc_group), rec->rm_startblock,
rec->rm_blockcount))
return -EFSCORRUPTED;
@ -268,7 +268,7 @@ xrep_bmap_walk_rmap(
if ((rec->rm_flags & XFS_RMAP_UNWRITTEN) && !rb->allow_unwritten)
return -EFSCORRUPTED;
fsbno = xfs_agbno_to_fsb(cur->bc_ag.pag, rec->rm_startblock);
fsbno = xfs_agbno_to_fsb(to_perag(cur->bc_group), rec->rm_startblock);
if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK) {
rb->old_bmbt_block_count += rec->rm_blockcount;
@ -407,12 +407,11 @@ xrep_bmap_find_mappings(
struct xrep_bmap *rb)
{
struct xfs_scrub *sc = rb->sc;
struct xfs_perag *pag;
xfs_agnumber_t agno;
struct xfs_perag *pag = NULL;
int error = 0;
/* Iterate the rmaps for extents. */
for_each_perag(sc->mp, agno, pag) {
while ((pag = xfs_perag_next(sc->mp, pag))) {
error = xrep_bmap_scan_ag(rb, pag);
if (error) {
xfs_perag_rele(pag);

View File

@ -513,7 +513,7 @@ xchk_perag_drain_and_lock(
* Obviously, this should be slanted against scrub and in favor
* of runtime threads.
*/
if (!xfs_perag_intent_busy(sa->pag))
if (!xfs_group_intent_busy(pag_group(sa->pag)))
return 0;
if (sa->agf_bp) {
@ -528,7 +528,7 @@ xchk_perag_drain_and_lock(
if (!(sc->flags & XCHK_FSGATES_DRAIN))
return -ECHRNG;
error = xfs_perag_intent_drain(sa->pag);
error = xfs_group_intent_drain(pag_group(sa->pag));
if (error == -ERESTARTSYS)
error = -EINTR;
} while (!error);

View File

@ -216,7 +216,8 @@ int xchk_metadata_inode_forks(struct xfs_scrub *sc);
#define xchk_xfile_ag_descr(sc, fmt, ...) \
kasprintf(XCHK_GFP_FLAGS, "XFS (%s): AG 0x%x " fmt, \
(sc)->mp->m_super->s_id, \
(sc)->sa.pag ? (sc)->sa.pag->pag_agno : (sc)->sm->sm_agno, \
(sc)->sa.pag ? \
pag_agno((sc)->sa.pag) : (sc)->sm->sm_agno, \
##__VA_ARGS__)
#define xchk_xfile_ino_descr(sc, fmt, ...) \
kasprintf(XCHK_GFP_FLAGS, "XFS (%s): inode 0x%llx " fmt, \

View File

@ -145,7 +145,8 @@ xrep_cow_mark_shared_staging(
xrep_cow_trim_refcount(xc, &rrec, rec);
return xrep_cow_mark_file_range(xc,
xfs_agbno_to_fsb(cur->bc_ag.pag, rrec.rc_startblock),
xfs_agbno_to_fsb(to_perag(cur->bc_group),
rrec.rc_startblock),
rrec.rc_blockcount);
}
@ -176,8 +177,9 @@ xrep_cow_mark_missing_staging(
if (xc->next_bno >= rrec.rc_startblock)
goto next;
error = xrep_cow_mark_file_range(xc,
xfs_agbno_to_fsb(cur->bc_ag.pag, xc->next_bno),
xfs_agbno_to_fsb(to_perag(cur->bc_group), xc->next_bno),
rrec.rc_startblock - xc->next_bno);
if (error)
return error;
@ -220,7 +222,8 @@ xrep_cow_mark_missing_staging_rmap(
}
return xrep_cow_mark_file_range(xc,
xfs_agbno_to_fsb(cur->bc_ag.pag, rec_bno), rec_len);
xfs_agbno_to_fsb(to_perag(cur->bc_group), rec_bno),
rec_len);
}
/*

View File

@ -74,10 +74,9 @@ xchk_fscount_warmup(
struct xfs_buf *agi_bp = NULL;
struct xfs_buf *agf_bp = NULL;
struct xfs_perag *pag = NULL;
xfs_agnumber_t agno;
int error = 0;
for_each_perag(mp, agno, pag) {
while ((pag = xfs_perag_next(mp, pag))) {
if (xchk_should_terminate(sc, &error))
break;
if (xfs_perag_initialised_agi(pag) &&
@ -295,9 +294,8 @@ xchk_fscount_aggregate_agcounts(
struct xchk_fscounters *fsc)
{
struct xfs_mount *mp = sc->mp;
struct xfs_perag *pag;
struct xfs_perag *pag = NULL;
uint64_t delayed;
xfs_agnumber_t agno;
int tries = 8;
int error = 0;
@ -306,7 +304,7 @@ retry:
fsc->ifree = 0;
fsc->fdblocks = 0;
for_each_perag(mp, agno, pag) {
while ((pag = xfs_perag_next(mp, pag))) {
if (xchk_should_terminate(sc, &error))
break;
@ -327,7 +325,7 @@ retry:
if (xfs_has_lazysbcount(sc->mp)) {
fsc->fdblocks += pag->pagf_btreeblks;
} else {
error = xchk_fscount_btreeblks(sc, fsc, agno);
error = xchk_fscount_btreeblks(sc, fsc, pag_agno(pag));
if (error)
break;
}

View File

@ -160,13 +160,12 @@ STATIC void
xchk_mark_all_healthy(
struct xfs_mount *mp)
{
struct xfs_perag *pag;
xfs_agnumber_t agno;
struct xfs_perag *pag = NULL;
xfs_fs_mark_healthy(mp, XFS_SICK_FS_INDIRECT);
xfs_rt_mark_healthy(mp, XFS_SICK_RT_INDIRECT);
for_each_perag(mp, agno, pag)
xfs_ag_mark_healthy(pag, XFS_SICK_AG_INDIRECT);
while ((pag = xfs_perag_next(mp, pag)))
xfs_group_mark_healthy(pag_group(pag), XFS_SICK_AG_INDIRECT);
}
/*
@ -207,9 +206,9 @@ xchk_update_health(
case XHG_AG:
pag = xfs_perag_get(sc->mp, sc->sm->sm_agno);
if (bad)
xfs_ag_mark_corrupt(pag, sc->sick_mask);
xfs_group_mark_corrupt(pag_group(pag), sc->sick_mask);
else
xfs_ag_mark_healthy(pag, sc->sick_mask);
xfs_group_mark_healthy(pag_group(pag), sc->sick_mask);
xfs_perag_put(pag);
break;
case XHG_INO:
@ -277,7 +276,7 @@ xchk_ag_btree_del_cursor_if_sick(
type_to_health_flag[sc->sm->sm_type].group == XHG_AG)
mask &= ~sc->sick_mask;
if (xfs_ag_has_sickness((*curp)->bc_ag.pag, mask)) {
if (xfs_group_has_sickness((*curp)->bc_group, mask)) {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
xfs_btree_del_cursor(*curp, XFS_BTREE_NOERROR);
*curp = NULL;
@ -294,9 +293,7 @@ xchk_health_record(
struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
struct xfs_perag *pag;
xfs_agnumber_t agno;
struct xfs_perag *pag = NULL;
unsigned int sick;
unsigned int checked;
@ -308,8 +305,8 @@ xchk_health_record(
if (sick & XFS_SICK_RT_PRIMARY)
xchk_set_corrupt(sc);
for_each_perag(mp, agno, pag) {
xfs_ag_measure_sickness(pag, &sick, &checked);
while ((pag = xfs_perag_next(mp, pag))) {
xfs_group_measure_sickness(pag_group(pag), &sick, &checked);
if (sick & XFS_SICK_AG_PRIMARY)
xchk_set_corrupt(sc);
}

View File

@ -258,7 +258,7 @@ xchk_iallocbt_chunk(
{
struct xfs_scrub *sc = bs->sc;
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_perag *pag = bs->cur->bc_ag.pag;
struct xfs_perag *pag = to_perag(bs->cur->bc_group);
xfs_agblock_t agbno;
xfs_extlen_t len;
@ -318,7 +318,7 @@ xchk_iallocbt_check_cluster_ifree(
* the record, compute which fs inode we're talking about.
*/
agino = irec->ir_startino + irec_ino;
fsino = xfs_agino_to_ino(bs->cur->bc_ag.pag, agino);
fsino = xfs_agino_to_ino(to_perag(bs->cur->bc_group), agino);
irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
@ -394,7 +394,7 @@ xchk_iallocbt_check_cluster(
* ir_startino can be large enough to make im_boffset nonzero.
*/
ir_holemask = (irec->ir_holemask & cluster_mask);
imap.im_blkno = xfs_agbno_to_daddr(bs->cur->bc_ag.pag, agbno);
imap.im_blkno = xfs_agbno_to_daddr(to_perag(bs->cur->bc_group), agbno);
imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) <<
mp->m_sb.sb_inodelog;
@ -405,9 +405,9 @@ xchk_iallocbt_check_cluster(
return 0;
}
trace_xchk_iallocbt_check_cluster(bs->cur->bc_ag.pag, irec->ir_startino,
imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
cluster_mask, ir_holemask,
trace_xchk_iallocbt_check_cluster(to_perag(bs->cur->bc_group),
irec->ir_startino, imap.im_blkno, imap.im_len,
cluster_base, nr_inodes, cluster_mask, ir_holemask,
XFS_INO_TO_OFFSET(mp, irec->ir_startino +
cluster_base));
@ -583,7 +583,7 @@ xchk_iallocbt_rec(
uint16_t holemask;
xfs_inobt_btrec_to_irec(mp, rec, &irec);
if (xfs_inobt_check_irec(bs->cur->bc_ag.pag, &irec) != NULL) {
if (xfs_inobt_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) {
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
return 0;
}

View File

@ -814,7 +814,7 @@ xrep_iallocbt(
sc->sick_mask = XFS_SICK_AG_INOBT | XFS_SICK_AG_FINOBT;
/* Set up enough storage to handle an AG with nothing but inodes. */
xfs_agino_range(mp, sc->sa.pag->pag_agno, &first_agino, &last_agino);
xfs_agino_range(mp, pag_agno(sc->sa.pag), &first_agino, &last_agino);
last_agino /= XFS_INODES_PER_CHUNK;
descr = xchk_xfile_ag_descr(sc, "inode index records");
error = xfarray_create(descr, last_agino,

View File

@ -761,14 +761,13 @@ STATIC int
xrep_dinode_count_rmaps(
struct xrep_inode *ri)
{
struct xfs_perag *pag;
xfs_agnumber_t agno;
struct xfs_perag *pag = NULL;
int error;
if (!xfs_has_rmapbt(ri->sc->mp) || xfs_has_realtime(ri->sc->mp))
return -EOPNOTSUPP;
for_each_perag(ri->sc->mp, agno, pag) {
while ((pag = xfs_perag_next(ri->sc->mp, pag))) {
error = xrep_dinode_count_ag_rmaps(ri, pag);
if (error) {
xfs_perag_rele(pag);

View File

@ -67,7 +67,7 @@ xchk_iscan_mask_skipino(
xfs_agnumber_t skip_agno = XFS_INO_TO_AGNO(mp, iscan->skip_ino);
xfs_agnumber_t skip_agino = XFS_INO_TO_AGINO(mp, iscan->skip_ino);
if (pag->pag_agno != skip_agno)
if (pag_agno(pag) != skip_agno)
return;
if (skip_agino < rec->ir_startino)
return;
@ -95,7 +95,7 @@ xchk_iscan_find_next(
struct xfs_btree_cur *cur;
struct xfs_mount *mp = sc->mp;
struct xfs_trans *tp = sc->tp;
xfs_agnumber_t agno = pag->pag_agno;
xfs_agnumber_t agno = pag_agno(pag);
xfs_agino_t lastino = NULLAGINO;
xfs_agino_t first, last;
xfs_agino_t agino = *cursor;

View File

@ -58,7 +58,7 @@ xrep_newbt_estimate_slack(
if (sc->ops->type == ST_PERAG) {
free = sc->sa.pag->pagf_freeblks;
sz = xfs_ag_block_count(sc->mp, sc->sa.pag->pag_agno);
sz = xfs_ag_block_count(sc->mp, pag_agno(sc->sa.pag));
} else {
free = percpu_counter_sum(&sc->mp->m_fdblocks);
sz = sc->mp->m_sb.sb_dblocks;
@ -205,7 +205,7 @@ xrep_newbt_validate_ag_alloc_hint(
struct xfs_scrub *sc = xnr->sc;
xfs_agnumber_t agno = XFS_FSB_TO_AGNO(sc->mp, xnr->alloc_hint);
if (agno == sc->sa.pag->pag_agno &&
if (agno == pag_agno(sc->sa.pag) &&
xfs_verify_fsbno(sc->mp, xnr->alloc_hint))
return;
@ -250,8 +250,8 @@ xrep_newbt_alloc_ag_blocks(
return -ENOSPC;
agno = XFS_FSB_TO_AGNO(mp, args.fsbno);
if (agno != sc->sa.pag->pag_agno) {
ASSERT(agno == sc->sa.pag->pag_agno);
if (agno != pag_agno(sc->sa.pag)) {
ASSERT(agno == pag_agno(sc->sa.pag));
return -EFSCORRUPTED;
}

View File

@ -137,7 +137,7 @@ xreap_put_freelist(
agfl_bp, agbno, 0);
if (error)
return error;
xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1,
xfs_extent_busy_insert(sc->tp, pag_group(sc->sa.pag), agbno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
return 0;

View File

@ -453,7 +453,8 @@ xchk_refcountbt_rec(
struct xchk_refcbt_records *rrc = bs->private;
xfs_refcount_btrec_to_irec(rec, &irec);
if (xfs_refcount_check_irec(bs->cur->bc_ag.pag, &irec) != NULL) {
if (xfs_refcount_check_irec(to_perag(bs->cur->bc_group), &irec) !=
NULL) {
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
return 0;
}

View File

@ -413,7 +413,7 @@ xrep_fix_freelist(
args.mp = sc->mp;
args.tp = sc->tp;
args.agno = sc->sa.pag->pag_agno;
args.agno = pag_agno(sc->sa.pag);
args.alignment = 1;
args.pag = sc->sa.pag;
@ -972,7 +972,7 @@ xrep_reset_perag_resv(
if (error == -ENOSPC) {
xfs_err(sc->mp,
"Insufficient free space to reset per-AG reservation for AG %u after repair.",
sc->sa.pag->pag_agno);
pag_agno(sc->sa.pag));
error = 0;
}

View File

@ -358,7 +358,7 @@ xchk_rmapbt_rec(
struct xfs_rmap_irec irec;
if (xfs_rmap_btrec_to_irec(rec, &irec) != NULL ||
xfs_rmap_check_irec(bs->cur->bc_ag.pag, &irec) != NULL) {
xfs_rmap_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) {
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
return 0;
}
@ -410,7 +410,7 @@ xchk_rmapbt_walk_ag_metadata(
goto out;
/* OWN_LOG: Internal log */
if (xfs_ag_contains_log(mp, sc->sa.pag->pag_agno)) {
if (xfs_ag_contains_log(mp, pag_agno(sc->sa.pag))) {
error = xagb_bitmap_set(&cr->log_owned,
XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart),
mp->m_sb.sb_logblocks);

View File

@ -344,7 +344,7 @@ xrep_rmap_visit_bmbt(
int error;
if (XFS_FSB_TO_AGNO(mp, rec->br_startblock) !=
rf->rr->sc->sa.pag->pag_agno)
pag_agno(rf->rr->sc->sa.pag))
return 0;
agbno = XFS_FSB_TO_AGBNO(mp, rec->br_startblock);
@ -391,7 +391,7 @@ xrep_rmap_visit_iroot_btree_block(
return 0;
fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
if (XFS_FSB_TO_AGNO(cur->bc_mp, fsbno) != rf->rr->sc->sa.pag->pag_agno)
if (XFS_FSB_TO_AGNO(cur->bc_mp, fsbno) != pag_agno(rf->rr->sc->sa.pag))
return 0;
agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
@ -622,7 +622,7 @@ xrep_rmap_walk_inobt(
return error;
xfs_inobt_btrec_to_irec(mp, rec, &irec);
if (xfs_inobt_check_irec(cur->bc_ag.pag, &irec) != NULL)
if (xfs_inobt_check_irec(to_perag(cur->bc_group), &irec) != NULL)
return -EFSCORRUPTED;
agino = irec.ir_startino;
@ -801,7 +801,7 @@ xrep_rmap_find_log_rmaps(
{
struct xfs_scrub *sc = rr->sc;
if (!xfs_ag_contains_log(sc->mp, sc->sa.pag->pag_agno))
if (!xfs_ag_contains_log(sc->mp, pag_agno(sc->sa.pag)))
return 0;
return xrep_rmap_stash(rr,
@ -976,7 +976,7 @@ xrep_rmap_try_reserve(
{
struct xrep_rmap_agfl ra = {
.bitmap = freesp_blocks,
.agno = rr->sc->sa.pag->pag_agno,
.agno = pag_agno(rr->sc->sa.pag),
};
struct xfs_scrub *sc = rr->sc;
struct xrep_newbt_resv *resv, *n;
@ -1596,7 +1596,7 @@ xrep_rmap_setup_scan(
/* Set up in-memory rmap btree */
error = xfs_rmapbt_mem_init(sc->mp, &rr->rmap_btree, sc->xmbtp,
sc->sa.pag->pag_agno);
pag_agno(sc->sa.pag));
if (error)
goto out_mutex;
@ -1611,7 +1611,7 @@ xrep_rmap_setup_scan(
*/
ASSERT(sc->flags & XCHK_FSGATES_RMAP);
xfs_rmap_hook_setup(&rr->rhook, xrep_rmapbt_live_update);
error = xfs_rmap_hook_add(sc->sa.pag, &rr->rhook);
error = xfs_rmap_hook_add(pag_group(sc->sa.pag), &rr->rhook);
if (error)
goto out_iscan;
return 0;
@ -1632,7 +1632,7 @@ xrep_rmap_teardown(
struct xfs_scrub *sc = rr->sc;
xchk_iscan_abort(&rr->iscan);
xfs_rmap_hook_del(sc->sa.pag, &rr->rhook);
xfs_rmap_hook_del(pag_group(sc->sa.pag), &rr->rhook);
xchk_iscan_teardown(&rr->iscan);
xfbtree_destroy(&rr->rmap_btree);
mutex_destroy(&rr->lock);

View File

@ -792,8 +792,8 @@ TRACE_EVENT(xchk_iallocbt_check_cluster,
__field(uint16_t, holemask)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->startino = startino;
__entry->map_daddr = map_daddr;
__entry->map_len = map_len;
@ -936,8 +936,8 @@ TRACE_EVENT(xchk_refcount_incorrect,
__field(xfs_nlink_t, seen)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->domain = irec->rc_domain;
__entry->startblock = irec->rc_startblock;
__entry->blockcount = irec->rc_blockcount;
@ -1929,8 +1929,8 @@ DECLARE_EVENT_CLASS(xrep_extent_class,
__field(xfs_extlen_t, len)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agbno = agbno;
__entry->len = len;
),
@ -1963,8 +1963,8 @@ DECLARE_EVENT_CLASS(xrep_reap_find_class,
__field(bool, crosslinked)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agbno = agbno;
__entry->len = len;
__entry->crosslinked = crosslinked;
@ -1997,8 +1997,8 @@ TRACE_EVENT(xrep_ibt_walk_rmap,
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agbno = rec->rm_startblock;
__entry->len = rec->rm_blockcount;
__entry->owner = rec->rm_owner;
@ -2026,8 +2026,8 @@ TRACE_EVENT(xrep_abt_found,
__field(xfs_extlen_t, blockcount)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->startblock = rec->ar_startblock;
__entry->blockcount = rec->ar_blockcount;
),
@ -2052,8 +2052,8 @@ TRACE_EVENT(xrep_ibt_found,
__field(uint64_t, freemask)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->startino = rec->ir_startino;
__entry->holemask = rec->ir_holemask;
__entry->count = rec->ir_count;
@ -2083,8 +2083,8 @@ TRACE_EVENT(xrep_refc_found,
__field(xfs_nlink_t, refcount)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->domain = rec->rc_domain;
__entry->startblock = rec->rc_startblock;
__entry->blockcount = rec->rc_blockcount;
@ -2144,8 +2144,8 @@ TRACE_EVENT(xrep_rmap_found,
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agbno = rec->rm_startblock;
__entry->len = rec->rm_blockcount;
__entry->owner = rec->rm_owner;
@ -2174,8 +2174,8 @@ TRACE_EVENT(xrep_findroot_block,
__field(uint16_t, level)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agbno = agbno;
__entry->magic = magic;
__entry->level = level;
@ -2201,8 +2201,8 @@ TRACE_EVENT(xrep_calc_ag_resblks,
__field(xfs_agblock_t, usedlen)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->icount = icount;
__entry->aglen = aglen;
__entry->freelen = freelen;
@ -2230,8 +2230,8 @@ TRACE_EVENT(xrep_calc_ag_resblks_btsize,
__field(xfs_agblock_t, refcbt_sz)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->bnobt_sz = bnobt_sz;
__entry->inobt_sz = inobt_sz;
__entry->rmapbt_sz = rmapbt_sz;
@ -2282,8 +2282,8 @@ DECLARE_EVENT_CLASS(xrep_newbt_extent_class,
__field(int64_t, owner)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agbno = agbno;
__entry->len = len;
__entry->owner = owner;
@ -2597,8 +2597,8 @@ TRACE_EVENT(xrep_cow_free_staging,
__field(xfs_extlen_t, blockcount)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agbno = agbno;
__entry->blockcount = blockcount;
),
@ -2657,8 +2657,8 @@ TRACE_EVENT(xrep_rmap_live_update,
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->op = op;
__entry->agbno = p->startblock;
__entry->len = p->blockcount;
@ -3317,9 +3317,9 @@ TRACE_EVENT(xrep_iunlink_visit,
__field(xfs_agino_t, next_agino)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agino = XFS_INO_TO_AGINO(pag->pag_mount, ip->i_ino);
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agino = XFS_INO_TO_AGINO(pag_mount(pag), ip->i_ino);
__entry->bucket = bucket;
__entry->bucket_agino = bucket_agino;
__entry->prev_agino = ip->i_prev_unlinked;
@ -3405,8 +3405,8 @@ TRACE_EVENT(xrep_iunlink_walk_ondisk_bucket,
__field(xfs_agino_t, next_agino)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->bucket = bucket;
__entry->prev_agino = prev_agino;
__entry->next_agino = next_agino;
@ -3431,8 +3431,8 @@ DECLARE_EVENT_CLASS(xrep_iunlink_resolve_class,
__field(xfs_agino_t, next_agino)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->bucket = bucket;
__entry->prev_agino = prev_agino;
__entry->next_agino = next_agino;
@ -3518,8 +3518,8 @@ TRACE_EVENT(xrep_iunlink_add_to_bucket,
__field(xfs_agino_t, next_agino)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->bucket = bucket;
__entry->agino = agino;
__entry->next_agino = curr_head;
@ -3544,8 +3544,8 @@ TRACE_EVENT(xrep_iunlink_commit_bucket,
__field(xfs_agino_t, agino)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->bucket = bucket;
__entry->old_agino = old_agino;
__entry->agino = agino;

View File

@ -334,7 +334,8 @@ xfs_bmap_update_get_group(
* intent drops the intent count, ensuring that the intent count
* remains nonzero across the transaction roll.
*/
bi->bi_pag = xfs_perag_intent_get(mp, bi->bi_bmap.br_startblock);
bi->bi_group = xfs_group_intent_get(mp, bi->bi_bmap.br_startblock,
XG_TYPE_AG);
}
/* Add this deferred BUI to the transaction. */
@ -368,7 +369,7 @@ xfs_bmap_update_put_group(
if (xfs_ifork_is_realtime(bi->bi_owner, bi->bi_whichfork))
return;
xfs_perag_intent_put(bi->bi_pag);
xfs_group_intent_put(bi->bi_group);
}
/* Cancel a deferred bmap update. */

View File

@ -117,10 +117,12 @@ xfs_discard_extents(
blk_start_plug(&plug);
list_for_each_entry(busyp, &extents->extent_list, list) {
trace_xfs_discard_extent(busyp->pag, busyp->bno, busyp->length);
struct xfs_perag *pag = to_perag(busyp->group);
trace_xfs_discard_extent(pag, busyp->bno, busyp->length);
error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
xfs_agbno_to_daddr(busyp->pag, busyp->bno),
xfs_agbno_to_daddr(pag, busyp->bno),
XFS_FSB_TO_BB(mp, busyp->length),
GFP_KERNEL, &bio);
if (error && error != -EOPNOTSUPP) {
@ -159,7 +161,7 @@ xfs_trim_gather_extents(
struct xfs_trim_cur *tcur,
struct xfs_busy_extents *extents)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_trans *tp;
struct xfs_btree_cur *cur;
struct xfs_buf *agbp;
@ -271,12 +273,12 @@ xfs_trim_gather_extents(
* If any blocks in the range are still busy, skip the
* discard and try again the next time.
*/
if (xfs_extent_busy_search(pag, fbno, flen)) {
if (xfs_extent_busy_search(pag_group(pag), fbno, flen)) {
trace_xfs_discard_busy(pag, fbno, flen);
goto next_extent;
}
xfs_extent_busy_insert_discard(pag, fbno, flen,
xfs_extent_busy_insert_discard(pag_group(pag), fbno, flen,
&extents->extent_list);
next_extent:
if (tcur->by_bno)
@ -365,7 +367,7 @@ xfs_trim_perag_extents(
* list after this function call, as it may have been freed by
* the time control returns to us.
*/
error = xfs_discard_extents(pag->pag_mount, extents);
error = xfs_discard_extents(pag_mount(pag), extents);
if (error)
break;
@ -387,8 +389,8 @@ xfs_trim_datadev_extents(
{
xfs_agnumber_t start_agno, end_agno;
xfs_agblock_t start_agbno, end_agbno;
struct xfs_perag *pag = NULL;
xfs_daddr_t ddev_end;
struct xfs_perag *pag;
int last_error = 0, error;
ddev_end = min_t(xfs_daddr_t, end,
@ -399,10 +401,10 @@ xfs_trim_datadev_extents(
end_agno = xfs_daddr_to_agno(mp, ddev_end);
end_agbno = xfs_daddr_to_agbno(mp, ddev_end);
for_each_perag_range(mp, start_agno, end_agno, pag) {
while ((pag = xfs_perag_next_range(mp, pag, start_agno, end_agno))) {
xfs_agblock_t agend = pag->block_count;
if (start_agno == end_agno)
if (pag_agno(pag) == end_agno)
agend = end_agbno;
error = xfs_trim_perag_extents(pag, start_agbno, agend, minlen);
if (error)

View File

@ -94,55 +94,39 @@ static inline int xfs_defer_drain_wait(struct xfs_defer_drain *dr)
}
/*
* Get a passive reference to the AG that contains a fsbno and declare an intent
* to update its metadata.
* Get a passive reference to the group that contains a fsbno and declare an
* intent to update its metadata.
*
* Other threads that need exclusive access can decide to back off if they see
* declared intentions.
*/
struct xfs_perag *
xfs_perag_intent_get(
struct xfs_group *
xfs_group_intent_get(
struct xfs_mount *mp,
xfs_fsblock_t fsbno)
xfs_fsblock_t fsbno,
enum xfs_group_type type)
{
struct xfs_perag *pag;
struct xfs_group *xg;
pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, fsbno));
if (!pag)
xg = xfs_group_get_by_fsb(mp, fsbno, type);
if (!xg)
return NULL;
xfs_perag_intent_hold(pag);
return pag;
trace_xfs_group_intent_hold(xg, __return_address);
xfs_defer_drain_grab(&xg->xg_intents_drain);
return xg;
}
/*
* Release our intent to update this AG's metadata, and then release our
* passive ref to the AG.
* Release our intent to update this groups metadata, and then release our
* passive ref to it.
*/
void
xfs_perag_intent_put(
struct xfs_perag *pag)
xfs_group_intent_put(
struct xfs_group *xg)
{
xfs_perag_intent_rele(pag);
xfs_perag_put(pag);
}
/*
* Declare an intent to update AG metadata. Other threads that need exclusive
* access can decide to back off if they see declared intentions.
*/
void
xfs_perag_intent_hold(
struct xfs_perag *pag)
{
trace_xfs_perag_intent_hold(pag, __return_address);
xfs_defer_drain_grab(&pag->pag_intents_drain);
}
/* Release our intent to update this AG's metadata. */
void
xfs_perag_intent_rele(
struct xfs_perag *pag)
{
trace_xfs_perag_intent_rele(pag, __return_address);
xfs_defer_drain_rele(&pag->pag_intents_drain);
trace_xfs_group_intent_rele(xg, __return_address);
xfs_defer_drain_rele(&xg->xg_intents_drain);
xfs_group_put(xg);
}
/*
@ -150,17 +134,19 @@ xfs_perag_intent_rele(
* Callers must not hold any AG header buffers.
*/
int
xfs_perag_intent_drain(
struct xfs_perag *pag)
xfs_group_intent_drain(
struct xfs_group *xg)
{
trace_xfs_perag_wait_intents(pag, __return_address);
return xfs_defer_drain_wait(&pag->pag_intents_drain);
trace_xfs_group_wait_intents(xg, __return_address);
return xfs_defer_drain_wait(&xg->xg_intents_drain);
}
/* Has anyone declared an intent to update this AG? */
/*
* Has anyone declared an intent to update this group?
*/
bool
xfs_perag_intent_busy(
struct xfs_perag *pag)
xfs_group_intent_busy(
struct xfs_group *xg)
{
return xfs_defer_drain_busy(&pag->pag_intents_drain);
return xfs_defer_drain_busy(&xg->xg_intents_drain);
}

View File

@ -6,6 +6,7 @@
#ifndef XFS_DRAIN_H_
#define XFS_DRAIN_H_
struct xfs_group;
struct xfs_perag;
#ifdef CONFIG_XFS_DRAIN_INTENTS
@ -61,27 +62,22 @@ void xfs_drain_wait_enable(void);
* soon as the item is added to the transaction and cannot drop the counter
* until the item is finished or cancelled.
*/
struct xfs_perag *xfs_perag_intent_get(struct xfs_mount *mp,
xfs_fsblock_t fsbno);
void xfs_perag_intent_put(struct xfs_perag *pag);
struct xfs_group *xfs_group_intent_get(struct xfs_mount *mp,
xfs_fsblock_t fsbno, enum xfs_group_type type);
void xfs_group_intent_put(struct xfs_group *rtg);
void xfs_perag_intent_hold(struct xfs_perag *pag);
void xfs_perag_intent_rele(struct xfs_perag *pag);
int xfs_group_intent_drain(struct xfs_group *xg);
bool xfs_group_intent_busy(struct xfs_group *xg);
int xfs_perag_intent_drain(struct xfs_perag *pag);
bool xfs_perag_intent_busy(struct xfs_perag *pag);
#else
struct xfs_defer_drain { /* empty */ };
#define xfs_defer_drain_free(dr) ((void)0)
#define xfs_defer_drain_init(dr) ((void)0)
#define xfs_perag_intent_get(mp, fsbno) \
xfs_perag_get((mp), XFS_FSB_TO_AGNO(mp, fsbno))
#define xfs_perag_intent_put(pag) xfs_perag_put(pag)
static inline void xfs_perag_intent_hold(struct xfs_perag *pag) { }
static inline void xfs_perag_intent_rele(struct xfs_perag *pag) { }
#define xfs_group_intent_get(_mp, _fsbno, _type) \
xfs_group_get_by_fsb((_mp), (_fsbno), (_type))
#define xfs_group_intent_put(xg) xfs_group_put(xg)
#endif /* CONFIG_XFS_DRAIN_INTENTS */

View File

@ -19,14 +19,22 @@
#include "xfs_log.h"
#include "xfs_ag.h"
struct xfs_extent_busy_tree {
spinlock_t eb_lock;
struct rb_root eb_tree;
unsigned int eb_gen;
wait_queue_head_t eb_wait;
};
static void
xfs_extent_busy_insert_list(
struct xfs_perag *pag,
struct xfs_group *xg,
xfs_agblock_t bno,
xfs_extlen_t len,
unsigned int flags,
struct list_head *busy_list)
{
struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
struct xfs_extent_busy *new;
struct xfs_extent_busy *busyp;
struct rb_node **rbp;
@ -34,17 +42,17 @@ xfs_extent_busy_insert_list(
new = kzalloc(sizeof(struct xfs_extent_busy),
GFP_KERNEL | __GFP_NOFAIL);
new->pag = xfs_perag_hold(pag);
new->group = xfs_group_hold(xg);
new->bno = bno;
new->length = len;
INIT_LIST_HEAD(&new->list);
new->flags = flags;
/* trace before insert to be able to see failed inserts */
trace_xfs_extent_busy(pag, bno, len);
trace_xfs_extent_busy(xg, bno, len);
spin_lock(&pag->pagb_lock);
rbp = &pag->pagb_tree.rb_node;
spin_lock(&eb->eb_lock);
rbp = &eb->eb_tree.rb_node;
while (*rbp) {
parent = *rbp;
busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
@ -61,32 +69,32 @@ xfs_extent_busy_insert_list(
}
rb_link_node(&new->rb_node, parent, rbp);
rb_insert_color(&new->rb_node, &pag->pagb_tree);
rb_insert_color(&new->rb_node, &eb->eb_tree);
/* always process discard lists in fifo order */
list_add_tail(&new->list, busy_list);
spin_unlock(&pag->pagb_lock);
spin_unlock(&eb->eb_lock);
}
void
xfs_extent_busy_insert(
struct xfs_trans *tp,
struct xfs_perag *pag,
struct xfs_group *xg,
xfs_agblock_t bno,
xfs_extlen_t len,
unsigned int flags)
{
xfs_extent_busy_insert_list(pag, bno, len, flags, &tp->t_busy);
xfs_extent_busy_insert_list(xg, bno, len, flags, &tp->t_busy);
}
void
xfs_extent_busy_insert_discard(
struct xfs_perag *pag,
struct xfs_group *xg,
xfs_agblock_t bno,
xfs_extlen_t len,
struct list_head *busy_list)
{
xfs_extent_busy_insert_list(pag, bno, len, XFS_EXTENT_BUSY_DISCARDED,
xfs_extent_busy_insert_list(xg, bno, len, XFS_EXTENT_BUSY_DISCARDED,
busy_list);
}
@ -101,17 +109,18 @@ xfs_extent_busy_insert_discard(
*/
int
xfs_extent_busy_search(
struct xfs_perag *pag,
struct xfs_group *xg,
xfs_agblock_t bno,
xfs_extlen_t len)
{
struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
struct rb_node *rbp;
struct xfs_extent_busy *busyp;
int match = 0;
/* find closest start bno overlap */
spin_lock(&pag->pagb_lock);
rbp = pag->pagb_tree.rb_node;
spin_lock(&eb->eb_lock);
rbp = eb->eb_tree.rb_node;
while (rbp) {
busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
if (bno < busyp->bno) {
@ -130,7 +139,7 @@ xfs_extent_busy_search(
break;
}
}
spin_unlock(&pag->pagb_lock);
spin_unlock(&eb->eb_lock);
return match;
}
@ -147,13 +156,15 @@ xfs_extent_busy_search(
*/
STATIC bool
xfs_extent_busy_update_extent(
struct xfs_perag *pag,
struct xfs_group *xg,
struct xfs_extent_busy *busyp,
xfs_agblock_t fbno,
xfs_extlen_t flen,
bool userdata) __releases(&pag->pagb_lock)
__acquires(&pag->pagb_lock)
bool userdata)
__releases(&eb->eb_lock)
__acquires(&eb->eb_lock)
{
struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
xfs_agblock_t fend = fbno + flen;
xfs_agblock_t bbno = busyp->bno;
xfs_agblock_t bend = bbno + busyp->length;
@ -164,9 +175,9 @@ xfs_extent_busy_update_extent(
* and retry.
*/
if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
spin_unlock(&pag->pagb_lock);
spin_unlock(&eb->eb_lock);
delay(1);
spin_lock(&pag->pagb_lock);
spin_lock(&eb->eb_lock);
return false;
}
@ -239,7 +250,7 @@ xfs_extent_busy_update_extent(
* tree root, because erasing the node can rearrange the
* tree topology.
*/
rb_erase(&busyp->rb_node, &pag->pagb_tree);
rb_erase(&busyp->rb_node, &eb->eb_tree);
busyp->length = 0;
return false;
} else if (fend < bend) {
@ -278,14 +289,14 @@ xfs_extent_busy_update_extent(
ASSERT(0);
}
trace_xfs_extent_busy_reuse(pag, fbno, flen);
trace_xfs_extent_busy_reuse(xg, fbno, flen);
return true;
out_force_log:
spin_unlock(&pag->pagb_lock);
xfs_log_force(pag->pag_mount, XFS_LOG_SYNC);
trace_xfs_extent_busy_force(pag, fbno, flen);
spin_lock(&pag->pagb_lock);
spin_unlock(&eb->eb_lock);
xfs_log_force(xg->xg_mount, XFS_LOG_SYNC);
trace_xfs_extent_busy_force(xg, fbno, flen);
spin_lock(&eb->eb_lock);
return false;
}
@ -294,17 +305,18 @@ out_force_log:
*/
void
xfs_extent_busy_reuse(
struct xfs_perag *pag,
struct xfs_group *xg,
xfs_agblock_t fbno,
xfs_extlen_t flen,
bool userdata)
{
struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
struct rb_node *rbp;
ASSERT(flen > 0);
spin_lock(&pag->pagb_lock);
spin_lock(&eb->eb_lock);
restart:
rbp = pag->pagb_tree.rb_node;
rbp = eb->eb_tree.rb_node;
while (rbp) {
struct xfs_extent_busy *busyp =
rb_entry(rbp, struct xfs_extent_busy, rb_node);
@ -319,11 +331,11 @@ restart:
continue;
}
if (!xfs_extent_busy_update_extent(pag, busyp, fbno, flen,
if (!xfs_extent_busy_update_extent(xg, busyp, fbno, flen,
userdata))
goto restart;
}
spin_unlock(&pag->pagb_lock);
spin_unlock(&eb->eb_lock);
}
/*
@ -332,7 +344,7 @@ restart:
* args->minlen no suitable extent could be found, and the higher level
* code needs to force out the log and retry the allocation.
*
* Return the current busy generation for the AG if the extent is busy. This
* Return the current busy generation for the group if the extent is busy. This
* value can be used to wait for at least one of the currently busy extents
* to be cleared. Note that the busy list is not guaranteed to be empty after
* the gen is woken. The state of a specific extent must always be confirmed
@ -340,11 +352,14 @@ restart:
*/
bool
xfs_extent_busy_trim(
struct xfs_alloc_arg *args,
struct xfs_group *xg,
xfs_extlen_t minlen,
xfs_extlen_t maxlen,
xfs_agblock_t *bno,
xfs_extlen_t *len,
unsigned *busy_gen)
{
struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
xfs_agblock_t fbno;
xfs_extlen_t flen;
struct rb_node *rbp;
@ -352,11 +367,11 @@ xfs_extent_busy_trim(
ASSERT(*len > 0);
spin_lock(&args->pag->pagb_lock);
spin_lock(&eb->eb_lock);
fbno = *bno;
flen = *len;
rbp = args->pag->pagb_tree.rb_node;
while (rbp && flen >= args->minlen) {
rbp = eb->eb_tree.rb_node;
while (rbp && flen >= minlen) {
struct xfs_extent_busy *busyp =
rb_entry(rbp, struct xfs_extent_busy, rb_node);
xfs_agblock_t fend = fbno + flen;
@ -477,13 +492,13 @@ xfs_extent_busy_trim(
* good chance subsequent allocations will be
* contiguous.
*/
if (bbno - fbno >= args->maxlen) {
if (bbno - fbno >= maxlen) {
/* left candidate fits perfect */
fend = bbno;
} else if (fend - bend >= args->maxlen * 4) {
} else if (fend - bend >= maxlen * 4) {
/* right candidate has enough free space */
fbno = bend;
} else if (bbno - fbno >= args->minlen) {
} else if (bbno - fbno >= minlen) {
/* left candidate fits minimum requirement */
fend = bbno;
} else {
@ -496,13 +511,13 @@ xfs_extent_busy_trim(
out:
if (fbno != *bno || flen != *len) {
trace_xfs_extent_busy_trim(args->pag, *bno, *len, fbno, flen);
trace_xfs_extent_busy_trim(xg, *bno, *len, fbno, flen);
*bno = fbno;
*len = flen;
*busy_gen = args->pag->pagb_gen;
*busy_gen = eb->eb_gen;
ret = true;
}
spin_unlock(&args->pag->pagb_lock);
spin_unlock(&eb->eb_lock);
return ret;
fail:
/*
@ -515,22 +530,24 @@ fail:
static bool
xfs_extent_busy_clear_one(
struct xfs_perag *pag,
struct xfs_extent_busy *busyp,
bool do_discard)
{
struct xfs_extent_busy_tree *eb = busyp->group->xg_busy_extents;
if (busyp->length) {
if (do_discard &&
!(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
return false;
}
trace_xfs_extent_busy_clear(pag, busyp->bno, busyp->length);
rb_erase(&busyp->rb_node, &pag->pagb_tree);
trace_xfs_extent_busy_clear(busyp->group, busyp->bno,
busyp->length);
rb_erase(&busyp->rb_node, &eb->eb_tree);
}
list_del_init(&busyp->list);
xfs_perag_put(busyp->pag);
xfs_group_put(busyp->group);
kfree(busyp);
return true;
}
@ -552,29 +569,30 @@ xfs_extent_busy_clear(
return;
do {
struct xfs_perag *pag = xfs_perag_hold(busyp->pag);
struct xfs_group *xg = xfs_group_hold(busyp->group);
struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
bool wakeup = false;
spin_lock(&pag->pagb_lock);
spin_lock(&eb->eb_lock);
do {
next = list_next_entry(busyp, list);
if (xfs_extent_busy_clear_one(pag, busyp, do_discard))
if (xfs_extent_busy_clear_one(busyp, do_discard))
wakeup = true;
busyp = next;
} while (!list_entry_is_head(busyp, list, list) &&
busyp->pag == pag);
busyp->group == xg);
if (wakeup) {
pag->pagb_gen++;
wake_up_all(&pag->pagb_wait);
eb->eb_gen++;
wake_up_all(&eb->eb_wait);
}
spin_unlock(&pag->pagb_lock);
xfs_perag_put(pag);
spin_unlock(&eb->eb_lock);
xfs_group_put(xg);
} while (!list_entry_is_head(busyp, list, list));
}
/*
* Flush out all busy extents for this AG.
* Flush out all busy extents for this group.
*
* If the current transaction is holding busy extents, the caller may not want
* to wait for committed busy extents to resolve. If we are being told just to
@ -590,10 +608,11 @@ xfs_extent_busy_clear(
int
xfs_extent_busy_flush(
struct xfs_trans *tp,
struct xfs_perag *pag,
struct xfs_group *xg,
unsigned busy_gen,
uint32_t alloc_flags)
{
struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
DEFINE_WAIT (wait);
int error;
@ -606,7 +625,7 @@ xfs_extent_busy_flush(
if (alloc_flags & XFS_ALLOC_FLAG_TRYFLUSH)
return 0;
if (busy_gen != READ_ONCE(pag->pagb_gen))
if (busy_gen != READ_ONCE(eb->eb_gen))
return 0;
if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
@ -615,37 +634,44 @@ xfs_extent_busy_flush(
/* Wait for committed busy extents to resolve. */
do {
prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
if (busy_gen != READ_ONCE(pag->pagb_gen))
prepare_to_wait(&eb->eb_wait, &wait, TASK_KILLABLE);
if (busy_gen != READ_ONCE(eb->eb_gen))
break;
schedule();
} while (1);
finish_wait(&pag->pagb_wait, &wait);
finish_wait(&eb->eb_wait, &wait);
return 0;
}
static void
xfs_extent_busy_wait_group(
struct xfs_group *xg)
{
DEFINE_WAIT (wait);
struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
do {
prepare_to_wait(&eb->eb_wait, &wait, TASK_KILLABLE);
if (RB_EMPTY_ROOT(&eb->eb_tree))
break;
schedule();
} while (1);
finish_wait(&eb->eb_wait, &wait);
}
void
xfs_extent_busy_wait_all(
struct xfs_mount *mp)
{
struct xfs_perag *pag;
DEFINE_WAIT (wait);
xfs_agnumber_t agno;
struct xfs_perag *pag = NULL;
for_each_perag(mp, agno, pag) {
do {
prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
if (RB_EMPTY_ROOT(&pag->pagb_tree))
break;
schedule();
} while (1);
finish_wait(&pag->pagb_wait, &wait);
}
while ((pag = xfs_perag_next(mp, pag)))
xfs_extent_busy_wait_group(pag_group(pag));
}
/*
* Callback for list_sort to sort busy extents by the AG they reside in.
* Callback for list_sort to sort busy extents by the group they reside in.
*/
int
xfs_extent_busy_ag_cmp(
@ -659,21 +685,38 @@ xfs_extent_busy_ag_cmp(
container_of(l2, struct xfs_extent_busy, list);
s32 diff;
diff = b1->pag->pag_agno - b2->pag->pag_agno;
diff = b1->group->xg_gno - b2->group->xg_gno;
if (!diff)
diff = b1->bno - b2->bno;
return diff;
}
/* Are there any busy extents in this AG? */
/* Are there any busy extents in this group? */
bool
xfs_extent_busy_list_empty(
struct xfs_perag *pag)
struct xfs_group *xg,
unsigned *busy_gen)
{
struct xfs_extent_busy_tree *eb = xg->xg_busy_extents;
bool res;
spin_lock(&pag->pagb_lock);
res = RB_EMPTY_ROOT(&pag->pagb_tree);
spin_unlock(&pag->pagb_lock);
spin_lock(&eb->eb_lock);
res = RB_EMPTY_ROOT(&eb->eb_tree);
*busy_gen = READ_ONCE(eb->eb_gen);
spin_unlock(&eb->eb_lock);
return res;
}
struct xfs_extent_busy_tree *
xfs_extent_busy_alloc(void)
{
struct xfs_extent_busy_tree *eb;
eb = kzalloc(sizeof(*eb), GFP_KERNEL);
if (!eb)
return NULL;
spin_lock_init(&eb->eb_lock);
init_waitqueue_head(&eb->eb_wait);
eb->eb_tree = RB_ROOT;
return eb;
}

View File

@ -8,19 +8,18 @@
#ifndef __XFS_EXTENT_BUSY_H__
#define __XFS_EXTENT_BUSY_H__
struct xfs_group;
struct xfs_mount;
struct xfs_perag;
struct xfs_trans;
struct xfs_alloc_arg;
/*
* Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that
* have been freed but whose transactions aren't committed to disk yet.
* Busy block/extent entry. Indexed by a rbtree in the group to mark blocks
* that have been freed but whose transactions aren't committed to disk yet.
*/
struct xfs_extent_busy {
struct rb_node rb_node; /* ag by-bno indexed search tree */
struct rb_node rb_node; /* group by-bno indexed search tree */
struct list_head list; /* transaction busy extent list */
struct xfs_perag *pag;
struct xfs_group *group;
xfs_agblock_t bno;
xfs_extlen_t length;
unsigned int flags;
@ -44,45 +43,29 @@ struct xfs_busy_extents {
void *owner;
};
void
xfs_extent_busy_insert(struct xfs_trans *tp, struct xfs_perag *pag,
xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
void
xfs_extent_busy_insert_discard(struct xfs_perag *pag, xfs_agblock_t bno,
xfs_extlen_t len, struct list_head *busy_list);
void
xfs_extent_busy_clear(struct list_head *list, bool do_discard);
int
xfs_extent_busy_search(struct xfs_perag *pag, xfs_agblock_t bno,
void xfs_extent_busy_insert(struct xfs_trans *tp, struct xfs_group *xg,
xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
void xfs_extent_busy_insert_discard(struct xfs_group *xg, xfs_agblock_t bno,
xfs_extlen_t len, struct list_head *busy_list);
void xfs_extent_busy_clear(struct list_head *list, bool do_discard);
int xfs_extent_busy_search(struct xfs_group *xg, xfs_agblock_t bno,
xfs_extlen_t len);
void
xfs_extent_busy_reuse(struct xfs_perag *pag, xfs_agblock_t fbno,
void xfs_extent_busy_reuse(struct xfs_group *xg, xfs_agblock_t fbno,
xfs_extlen_t flen, bool userdata);
bool
xfs_extent_busy_trim(struct xfs_alloc_arg *args, xfs_agblock_t *bno,
xfs_extlen_t *len, unsigned *busy_gen);
int
xfs_extent_busy_flush(struct xfs_trans *tp, struct xfs_perag *pag,
bool xfs_extent_busy_trim(struct xfs_group *xg, xfs_extlen_t minlen,
xfs_extlen_t maxlen, xfs_agblock_t *bno, xfs_extlen_t *len,
unsigned *busy_gen);
int xfs_extent_busy_flush(struct xfs_trans *tp, struct xfs_group *xg,
unsigned busy_gen, uint32_t alloc_flags);
void xfs_extent_busy_wait_all(struct xfs_mount *mp);
bool xfs_extent_busy_list_empty(struct xfs_group *xg, unsigned int *busy_gen);
struct xfs_extent_busy_tree *xfs_extent_busy_alloc(void);
void
xfs_extent_busy_wait_all(struct xfs_mount *mp);
int
xfs_extent_busy_ag_cmp(void *priv, const struct list_head *a,
const struct list_head *b);
int xfs_extent_busy_ag_cmp(void *priv, const struct list_head *a,
const struct list_head *b);
static inline void xfs_extent_busy_sort(struct list_head *list)
{
list_sort(NULL, list, xfs_extent_busy_ag_cmp);
}
bool xfs_extent_busy_list_empty(struct xfs_perag *pag);
#endif /* __XFS_EXTENT_BUSY_H__ */

View File

@ -362,7 +362,7 @@ xfs_extent_free_diff_items(
struct xfs_extent_free_item *ra = xefi_entry(a);
struct xfs_extent_free_item *rb = xefi_entry(b);
return ra->xefi_pag->pag_agno - rb->xefi_pag->pag_agno;
return ra->xefi_group->xg_gno - rb->xefi_group->xg_gno;
}
/* Log a free extent to the intent item. */
@ -447,7 +447,8 @@ xfs_extent_free_defer_add(
trace_xfs_extent_free_defer(mp, xefi);
xefi->xefi_pag = xfs_perag_intent_get(mp, xefi->xefi_startblock);
xefi->xefi_group = xfs_group_intent_get(mp, xefi->xefi_startblock,
XG_TYPE_AG);
if (xefi->xefi_agresv == XFS_AG_RESV_AGFL)
*dfpp = xfs_defer_add(tp, &xefi->xefi_list,
&xfs_agfl_free_defer_type);
@ -463,7 +464,7 @@ xfs_extent_free_cancel_item(
{
struct xfs_extent_free_item *xefi = xefi_entry(item);
xfs_perag_intent_put(xefi->xefi_pag);
xfs_group_intent_put(xefi->xefi_group);
kmem_cache_free(xfs_extfree_item_cache, xefi);
}
@ -499,7 +500,7 @@ xfs_extent_free_finish_item(
* in this EFI to the EFD so this works correctly.
*/
if (!(xefi->xefi_flags & XFS_EFI_CANCELLED))
error = __xfs_free_extent(tp, xefi->xefi_pag, agbno,
error = __xfs_free_extent(tp, to_perag(xefi->xefi_group), agbno,
xefi->xefi_blockcount, &oinfo, xefi->xefi_agresv,
xefi->xefi_flags & XFS_EFI_SKIP_DISCARD);
if (error == -EAGAIN) {
@ -545,7 +546,7 @@ xfs_agfl_free_finish_item(
trace_xfs_agfl_free_deferred(mp, xefi);
error = xfs_alloc_read_agf(xefi->xefi_pag, tp, 0, &agbp);
error = xfs_alloc_read_agf(to_perag(xefi->xefi_group), tp, 0, &agbp);
if (!error)
error = xfs_free_ag_extent(tp, agbp, agbno, 1, &oinfo,
XFS_AG_RESV_AGFL);
@ -578,7 +579,8 @@ xfs_efi_recover_work(
xefi->xefi_blockcount = extp->ext_len;
xefi->xefi_agresv = XFS_AG_RESV_NONE;
xefi->xefi_owner = XFS_RMAP_OWN_UNKNOWN;
xefi->xefi_pag = xfs_perag_intent_get(mp, extp->ext_start);
xefi->xefi_group = xfs_group_intent_get(mp, extp->ext_start,
XG_TYPE_AG);
xfs_defer_add_item(dfp, &xefi->xefi_list);
}

View File

@ -96,7 +96,7 @@ restart:
maxfree = pag->pagf_freeblks;
if (max_pag)
xfs_perag_rele(max_pag);
atomic_inc(&pag->pag_active_ref);
atomic_inc(&pag_group(pag)->xg_active_ref);
max_pag = pag;
}
@ -222,7 +222,7 @@ xfs_filestream_lookup_association(
* down immediately after we mark the lookup as done.
*/
pag = container_of(mru, struct xfs_fstrm_item, mru)->pag;
atomic_inc(&pag->pag_active_ref);
atomic_inc(&pag_group(pag)->xg_active_ref);
xfs_mru_cache_done(mp->m_filestream);
trace_xfs_filestream_lookup(pag, ap->ip->i_ino);
@ -275,7 +275,7 @@ xfs_filestream_create_association(
struct xfs_fstrm_item *item =
container_of(mru, struct xfs_fstrm_item, mru);
agno = (item->pag->pag_agno + 1) % mp->m_sb.sb_agcount;
agno = (pag_agno(item->pag) + 1) % mp->m_sb.sb_agcount;
xfs_fstrm_free_func(mp, mru);
} else if (xfs_is_inode32(mp)) {
xfs_agnumber_t rotorstep = xfs_rotorstep;
@ -314,7 +314,7 @@ xfs_filestream_create_association(
if (!item)
goto out_put_fstrms;
atomic_inc(&args->pag->pag_active_ref);
atomic_inc(&pag_group(args->pag)->xg_active_ref);
item->pag = args->pag;
error = xfs_mru_cache_insert(mp->m_filestream, pino, &item->mru);
if (error)

View File

@ -158,7 +158,7 @@ struct xfs_getfsmap_info {
struct xfs_fsmap_head *head;
struct fsmap *fsmap_recs; /* mapping records */
struct xfs_buf *agf_bp; /* AGF, for refcount queries */
struct xfs_perag *pag; /* AG info, if applicable */
struct xfs_group *group; /* group info, if applicable */
xfs_daddr_t next_daddr; /* next daddr we expect */
/* daddr of low fsmap key when we're using the rtbitmap */
xfs_daddr_t low_daddr;
@ -216,12 +216,13 @@ xfs_getfsmap_is_shared(
if (!xfs_has_reflink(mp))
return 0;
/* rt files will have no perag structure */
if (!info->pag)
if (!info->group)
return 0;
/* Are there any shared blocks here? */
flen = 0;
cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp, info->pag);
cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp,
to_perag(info->group));
error = xfs_refcount_find_shared(cur, rec->rm_startblock,
rec->rm_blockcount, &fbno, &flen, false);
@ -353,7 +354,8 @@ xfs_getfsmap_helper(
return -ECANCELED;
trace_xfs_fsmap_mapping(mp, info->dev,
info->pag ? info->pag->pag_agno : NULLAGNUMBER, rec);
info->group ? info->group->xg_gno : NULLAGNUMBER,
rec);
fmr.fmr_device = info->dev;
fmr.fmr_physical = rec_daddr;
@ -394,7 +396,8 @@ xfs_getfsmap_datadev_helper(
struct xfs_getfsmap_info *info = priv;
return xfs_getfsmap_helper(cur->bc_tp, info, rec,
xfs_agbno_to_daddr(cur->bc_ag.pag, rec->rm_startblock),
xfs_agbno_to_daddr(to_perag(cur->bc_group),
rec->rm_startblock),
0);
}
@ -415,7 +418,8 @@ xfs_getfsmap_datadev_bnobt_helper(
irec.rm_flags = 0;
return xfs_getfsmap_helper(cur->bc_tp, info, &irec,
xfs_agbno_to_daddr(cur->bc_ag.pag, rec->ar_startblock),
xfs_agbno_to_daddr(to_perag(cur->bc_group),
rec->ar_startblock),
0);
}
@ -460,11 +464,11 @@ __xfs_getfsmap_datadev(
void *priv)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_perag *pag;
struct xfs_perag *pag = NULL;
struct xfs_btree_cur *bt_cur = NULL;
xfs_fsblock_t start_fsb;
xfs_fsblock_t end_fsb;
xfs_agnumber_t start_ag, end_ag, ag;
xfs_agnumber_t start_ag, end_ag;
uint64_t eofs;
int error = 0;
@ -512,14 +516,13 @@ __xfs_getfsmap_datadev(
start_ag = XFS_FSB_TO_AGNO(mp, start_fsb);
end_ag = XFS_FSB_TO_AGNO(mp, end_fsb);
ag = start_ag;
for_each_perag_range(mp, ag, end_ag, pag) {
while ((pag = xfs_perag_next_range(mp, pag, start_ag, end_ag))) {
/*
* Set the AG high key from the fsmap high key if this
* is the last AG that we're querying.
*/
info->pag = pag;
if (pag->pag_agno == end_ag) {
info->group = pag_group(pag);
if (pag_agno(pag) == end_ag) {
info->high.rm_startblock = XFS_FSB_TO_AGBNO(mp,
end_fsb);
info->high.rm_offset = XFS_BB_TO_FSBT(mp,
@ -541,9 +544,9 @@ __xfs_getfsmap_datadev(
if (error)
break;
trace_xfs_fsmap_low_key(mp, info->dev, pag->pag_agno,
trace_xfs_fsmap_low_key(mp, info->dev, pag_agno(pag),
&info->low);
trace_xfs_fsmap_high_key(mp, info->dev, pag->pag_agno,
trace_xfs_fsmap_high_key(mp, info->dev, pag_agno(pag),
&info->high);
error = query_fn(tp, info, &bt_cur, priv);
@ -554,7 +557,7 @@ __xfs_getfsmap_datadev(
* Set the AG low key to the start of the AG prior to
* moving on to the next AG.
*/
if (pag->pag_agno == start_ag)
if (pag_agno(pag) == start_ag)
memset(&info->low, 0, sizeof(info->low));
/*
@ -562,13 +565,13 @@ __xfs_getfsmap_datadev(
* before we drop the reference to the perag when the loop
* terminates.
*/
if (pag->pag_agno == end_ag) {
if (pag_agno(pag) == end_ag) {
info->last = true;
error = query_fn(tp, info, &bt_cur, priv);
if (error)
break;
}
info->pag = NULL;
info->group = NULL;
}
if (bt_cur)
@ -578,9 +581,9 @@ __xfs_getfsmap_datadev(
xfs_trans_brelse(tp, info->agf_bp);
info->agf_bp = NULL;
}
if (info->pag) {
xfs_perag_rele(info->pag);
info->pag = NULL;
if (info->group) {
xfs_perag_rele(pag);
info->group = NULL;
} else if (pag) {
/* loop termination case */
xfs_perag_rele(pag);
@ -603,7 +606,7 @@ xfs_getfsmap_datadev_rmapbt_query(
/* Allocate cursor for this AG and query_range it. */
*curpp = xfs_rmapbt_init_cursor(tp->t_mountp, tp, info->agf_bp,
info->pag);
to_perag(info->group));
return xfs_rmap_query_range(*curpp, &info->low, &info->high,
xfs_getfsmap_datadev_helper, info);
}
@ -636,7 +639,7 @@ xfs_getfsmap_datadev_bnobt_query(
/* Allocate cursor for this AG and query_range it. */
*curpp = xfs_bnobt_init_cursor(tp->t_mountp, tp, info->agf_bp,
info->pag);
to_perag(info->group));
key->ar_startblock = info->low.rm_startblock;
key[1].ar_startblock = info->high.rm_startblock;
return xfs_alloc_query_range(*curpp, key, &key[1],
@ -996,7 +999,7 @@ xfs_getfsmap(
info.dev = handlers[i].dev;
info.last = false;
info.pag = NULL;
info.group = NULL;
info.low_daddr = XFS_BUF_DADDR_NULL;
info.low.rm_blockcount = 0;
error = handlers[i].fn(tp, dkeys, &info);

View File

@ -528,13 +528,12 @@ int
xfs_fs_reserve_ag_blocks(
struct xfs_mount *mp)
{
xfs_agnumber_t agno;
struct xfs_perag *pag;
struct xfs_perag *pag = NULL;
int error = 0;
int err2;
mp->m_finobt_nores = false;
for_each_perag(mp, agno, pag) {
while ((pag = xfs_perag_next(mp, pag))) {
err2 = xfs_ag_resv_init(pag, NULL);
if (err2 && !error)
error = err2;
@ -556,9 +555,8 @@ void
xfs_fs_unreserve_ag_blocks(
struct xfs_mount *mp)
{
xfs_agnumber_t agno;
struct xfs_perag *pag;
struct xfs_perag *pag = NULL;
for_each_perag(mp, agno, pag)
while ((pag = xfs_perag_next(mp, pag)))
xfs_ag_resv_free(pag);
}

View File

@ -28,8 +28,7 @@ void
xfs_health_unmount(
struct xfs_mount *mp)
{
struct xfs_perag *pag;
xfs_agnumber_t agno;
struct xfs_perag *pag = NULL;
unsigned int sick = 0;
unsigned int checked = 0;
bool warn = false;
@ -38,10 +37,11 @@ xfs_health_unmount(
return;
/* Measure AG corruption levels. */
for_each_perag(mp, agno, pag) {
xfs_ag_measure_sickness(pag, &sick, &checked);
while ((pag = xfs_perag_next(mp, pag))) {
xfs_group_measure_sickness(pag_group(pag), &sick, &checked);
if (sick) {
trace_xfs_ag_unfixed_corruption(pag, sick);
trace_xfs_group_unfixed_corruption(pag_group(pag),
sick);
warn = true;
}
}
@ -228,61 +228,65 @@ xfs_agno_mark_sick(
/* Mark unhealthy per-ag metadata. */
void
xfs_ag_mark_sick(
struct xfs_perag *pag,
xfs_group_mark_sick(
struct xfs_group *xg,
unsigned int mask)
{
ASSERT(!(mask & ~XFS_SICK_AG_ALL));
trace_xfs_ag_mark_sick(pag, mask);
trace_xfs_group_mark_sick(xg, mask);
spin_lock(&pag->pag_state_lock);
pag->pag_sick |= mask;
spin_unlock(&pag->pag_state_lock);
spin_lock(&xg->xg_state_lock);
xg->xg_sick |= mask;
spin_unlock(&xg->xg_state_lock);
}
/* Mark per-ag metadata as having been checked and found unhealthy by fsck. */
/*
* Mark per-group metadata as having been checked and found unhealthy by fsck.
*/
void
xfs_ag_mark_corrupt(
struct xfs_perag *pag,
xfs_group_mark_corrupt(
struct xfs_group *xg,
unsigned int mask)
{
ASSERT(!(mask & ~XFS_SICK_AG_ALL));
trace_xfs_ag_mark_corrupt(pag, mask);
trace_xfs_group_mark_corrupt(xg, mask);
spin_lock(&pag->pag_state_lock);
pag->pag_sick |= mask;
pag->pag_checked |= mask;
spin_unlock(&pag->pag_state_lock);
spin_lock(&xg->xg_state_lock);
xg->xg_sick |= mask;
xg->xg_checked |= mask;
spin_unlock(&xg->xg_state_lock);
}
/* Mark per-ag metadata ok. */
/*
* Mark per-group metadata ok.
*/
void
xfs_ag_mark_healthy(
struct xfs_perag *pag,
xfs_group_mark_healthy(
struct xfs_group *xg,
unsigned int mask)
{
ASSERT(!(mask & ~XFS_SICK_AG_ALL));
trace_xfs_ag_mark_healthy(pag, mask);
trace_xfs_group_mark_healthy(xg, mask);
spin_lock(&pag->pag_state_lock);
pag->pag_sick &= ~mask;
if (!(pag->pag_sick & XFS_SICK_AG_PRIMARY))
pag->pag_sick &= ~XFS_SICK_AG_SECONDARY;
pag->pag_checked |= mask;
spin_unlock(&pag->pag_state_lock);
spin_lock(&xg->xg_state_lock);
xg->xg_sick &= ~mask;
if (!(xg->xg_sick & XFS_SICK_AG_PRIMARY))
xg->xg_sick &= ~XFS_SICK_AG_SECONDARY;
xg->xg_checked |= mask;
spin_unlock(&xg->xg_state_lock);
}
/* Sample which per-ag metadata are unhealthy. */
void
xfs_ag_measure_sickness(
struct xfs_perag *pag,
xfs_group_measure_sickness(
struct xfs_group *xg,
unsigned int *sick,
unsigned int *checked)
{
spin_lock(&pag->pag_state_lock);
*sick = pag->pag_sick;
*checked = pag->pag_checked;
spin_unlock(&pag->pag_state_lock);
spin_lock(&xg->xg_state_lock);
*sick = xg->xg_sick;
*checked = xg->xg_checked;
spin_unlock(&xg->xg_state_lock);
}
/* Mark the unhealthy parts of an inode. */
@ -448,7 +452,7 @@ xfs_ag_geom_health(
ageo->ag_sick = 0;
ageo->ag_checked = 0;
xfs_ag_measure_sickness(pag, &sick, &checked);
xfs_group_measure_sickness(pag_group(pag), &sick, &checked);
for (m = ag_map; m->sick_mask; m++) {
if (checked & m->sick_mask)
ageo->ag_checked |= m->ioctl_mask;
@ -527,24 +531,13 @@ void
xfs_btree_mark_sick(
struct xfs_btree_cur *cur)
{
switch (cur->bc_ops->type) {
case XFS_BTREE_TYPE_MEM:
/* no health state tracking for ephemeral btrees */
return;
case XFS_BTREE_TYPE_AG:
if (xfs_btree_is_bmap(cur->bc_ops)) {
xfs_bmap_mark_sick(cur->bc_ino.ip, cur->bc_ino.whichfork);
/* no health state tracking for ephemeral btrees */
} else if (cur->bc_ops->type != XFS_BTREE_TYPE_MEM) {
ASSERT(cur->bc_group);
ASSERT(cur->bc_ops->sick_mask);
xfs_ag_mark_sick(cur->bc_ag.pag, cur->bc_ops->sick_mask);
return;
case XFS_BTREE_TYPE_INODE:
if (xfs_btree_is_bmap(cur->bc_ops)) {
xfs_bmap_mark_sick(cur->bc_ino.ip,
cur->bc_ino.whichfork);
return;
}
fallthrough;
default:
ASSERT(0);
return;
xfs_group_mark_sick(cur->bc_group, cur->bc_ops->sick_mask);
}
}

View File

@ -204,7 +204,7 @@ xfs_reclaim_work_queue(
{
rcu_read_lock();
if (xa_marked(&mp->m_perags, XFS_PERAG_RECLAIM_MARK)) {
if (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) {
queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
}
@ -219,15 +219,14 @@ static inline void
xfs_blockgc_queue(
struct xfs_perag *pag)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
if (!xfs_is_blockgc_enabled(mp))
return;
rcu_read_lock();
if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
queue_delayed_work(pag->pag_mount->m_blockgc_wq,
&pag->pag_blockgc_work,
queue_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work,
msecs_to_jiffies(xfs_blockgc_secs * 1000));
rcu_read_unlock();
}
@ -239,7 +238,6 @@ xfs_perag_set_inode_tag(
xfs_agino_t agino,
unsigned int tag)
{
struct xfs_mount *mp = pag->pag_mount;
bool was_tagged;
lockdep_assert_held(&pag->pag_ici_lock);
@ -253,13 +251,13 @@ xfs_perag_set_inode_tag(
if (was_tagged)
return;
/* propagate the tag up into the perag radix tree */
xa_set_mark(&mp->m_perags, pag->pag_agno, ici_tag_to_mark(tag));
/* propagate the tag up into the pag xarray tree */
xfs_group_set_mark(pag_group(pag), ici_tag_to_mark(tag));
/* start background work */
switch (tag) {
case XFS_ICI_RECLAIM_TAG:
xfs_reclaim_work_queue(mp);
xfs_reclaim_work_queue(pag_mount(pag));
break;
case XFS_ICI_BLOCKGC_TAG:
xfs_blockgc_queue(pag);
@ -276,8 +274,6 @@ xfs_perag_clear_inode_tag(
xfs_agino_t agino,
unsigned int tag)
{
struct xfs_mount *mp = pag->pag_mount;
lockdep_assert_held(&pag->pag_ici_lock);
/*
@ -295,9 +291,8 @@ xfs_perag_clear_inode_tag(
if (radix_tree_tagged(&pag->pag_ici_root, tag))
return;
/* clear the tag from the perag radix tree */
xa_clear_mark(&mp->m_perags, pag->pag_agno, ici_tag_to_mark(tag));
/* clear the tag from the pag xarray */
xfs_group_clear_mark(pag_group(pag), ici_tag_to_mark(tag));
trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
}
@ -310,22 +305,9 @@ xfs_perag_grab_next_tag(
struct xfs_perag *pag,
int tag)
{
unsigned long index = 0;
if (pag) {
index = pag->pag_agno + 1;
xfs_perag_rele(pag);
}
rcu_read_lock();
pag = xa_find(&mp->m_perags, &index, ULONG_MAX, ici_tag_to_mark(tag));
if (pag) {
trace_xfs_perag_grab_next_tag(pag, _RET_IP_);
if (!atomic_inc_not_zero(&pag->pag_active_ref))
pag = NULL;
}
rcu_read_unlock();
return pag;
return to_perag(xfs_group_grab_next_mark(mp,
pag ? pag_group(pag) : NULL,
ici_tag_to_mark(tag), XG_TYPE_AG));
}
/*
@ -1014,7 +996,7 @@ xfs_reclaim_inodes(
if (xfs_want_reclaim_sick(mp))
icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
while (xa_marked(&mp->m_perags, XFS_PERAG_RECLAIM_MARK)) {
while (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) {
xfs_ail_push_all_sync(mp->m_ail);
xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
}
@ -1056,7 +1038,7 @@ long
xfs_reclaim_inodes_count(
struct xfs_mount *mp)
{
XA_STATE (xas, &mp->m_perags, 0);
XA_STATE (xas, &mp->m_groups[XG_TYPE_AG].xa, 0);
long reclaimable = 0;
struct xfs_perag *pag;
@ -1401,13 +1383,12 @@ void
xfs_blockgc_stop(
struct xfs_mount *mp)
{
struct xfs_perag *pag;
xfs_agnumber_t agno;
struct xfs_perag *pag = NULL;
if (!xfs_clear_blockgc_enabled(mp))
return;
for_each_perag(mp, agno, pag)
while ((pag = xfs_perag_next(mp, pag)))
cancel_delayed_work_sync(&pag->pag_blockgc_work);
trace_xfs_blockgc_stop(mp, __return_address);
}
@ -1499,7 +1480,7 @@ xfs_blockgc_worker(
{
struct xfs_perag *pag = container_of(to_delayed_work(work),
struct xfs_perag, pag_blockgc_work);
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
int error;
trace_xfs_blockgc_worker(mp, __return_address);
@ -1507,7 +1488,7 @@ xfs_blockgc_worker(
error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
if (error)
xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
pag->pag_agno, error);
pag_agno(pag), error);
xfs_blockgc_queue(pag);
}
@ -1548,8 +1529,7 @@ xfs_blockgc_flush_all(
* queued, it will not be requeued. Then flush whatever is left.
*/
while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
mod_delayed_work(pag->pag_mount->m_blockgc_wq,
&pag->pag_blockgc_work, 0);
mod_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work, 0);
while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
flush_delayed_work(&pag->pag_blockgc_work);
@ -1688,7 +1668,7 @@ xfs_icwalk_ag(
enum xfs_icwalk_goal goal,
struct xfs_icwalk *icw)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
uint32_t first_index;
int last_error = 0;
int skipped;
@ -1741,7 +1721,7 @@ restart:
* us to see this inode, so another lookup from the
* same index will not find it again.
*/
if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag_agno(pag))
continue;
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))

View File

@ -1514,7 +1514,7 @@ xfs_iunlink_reload_next(
xfs_agino_t next_agino)
{
struct xfs_perag *pag = agibp->b_pag;
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_inode *next_ip = NULL;
int error;
@ -1529,7 +1529,7 @@ xfs_iunlink_reload_next(
xfs_info_ratelimited(mp,
"Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating recovery.",
next_agino, pag->pag_agno);
next_agino, pag_agno(pag));
/*
* Use an untrusted lookup just to be cautious in case the AGI has been
@ -1572,7 +1572,7 @@ xfs_ifree_mark_inode_stale(
struct xfs_inode *free_ip,
xfs_ino_t inum)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_inode_log_item *iip;
struct xfs_inode *ip;

View File

@ -188,7 +188,7 @@ xfs_iwalk_ag_recs(
return 0;
if (iwag->inobt_walk_fn) {
error = iwag->inobt_walk_fn(mp, tp, pag->pag_agno, irec,
error = iwag->inobt_walk_fn(mp, tp, pag_agno(pag), irec,
iwag->data);
if (error)
return error;
@ -405,7 +405,7 @@ xfs_iwalk_ag(
int error = 0;
/* Set up our cursor at the right place in the inode btree. */
ASSERT(pag->pag_agno == XFS_INO_TO_AGNO(mp, iwag->startino));
ASSERT(pag_agno(pag) == XFS_INO_TO_AGNO(mp, iwag->startino));
agino = XFS_INO_TO_AGINO(mp, iwag->startino);
error = xfs_iwalk_ag_start(iwag, agino, &cur, &agi_bp, &has_more);
@ -534,6 +534,37 @@ xfs_iwalk_prefetch(
return max(inobt_records, 2U);
}
static int
xfs_iwalk_args(
struct xfs_iwalk_ag *iwag,
unsigned int flags)
{
struct xfs_mount *mp = iwag->mp;
xfs_agnumber_t start_agno;
int error;
start_agno = XFS_INO_TO_AGNO(iwag->mp, iwag->startino);
ASSERT(start_agno < iwag->mp->m_sb.sb_agcount);
ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL));
error = xfs_iwalk_alloc(iwag);
if (error)
return error;
while ((iwag->pag = xfs_perag_next_from(mp, iwag->pag, start_agno))) {
error = xfs_iwalk_ag(iwag);
if (error || (flags & XFS_IWALK_SAME_AG)) {
xfs_perag_rele(iwag->pag);
break;
}
iwag->startino =
XFS_AGINO_TO_INO(mp, pag_agno(iwag->pag) + 1, 0);
}
xfs_iwalk_free(iwag);
return error;
}
/*
* Walk all inodes in the filesystem starting from @startino. The @iwalk_fn
* will be called for each allocated inode, being passed the inode's number and
@ -562,32 +593,8 @@ xfs_iwalk(
.pwork = XFS_PWORK_SINGLE_THREADED,
.lastino = NULLFSINO,
};
struct xfs_perag *pag;
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
int error;
ASSERT(agno < mp->m_sb.sb_agcount);
ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL));
error = xfs_iwalk_alloc(&iwag);
if (error)
return error;
for_each_perag_from(mp, agno, pag) {
iwag.pag = pag;
error = xfs_iwalk_ag(&iwag);
if (error)
break;
iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
if (flags & XFS_INOBT_WALK_SAME_AG)
break;
iwag.pag = NULL;
}
if (iwag.pag)
xfs_perag_rele(pag);
xfs_iwalk_free(&iwag);
return error;
return xfs_iwalk_args(&iwag, flags);
}
/* Run per-thread iwalk work. */
@ -639,19 +646,19 @@ xfs_iwalk_threaded(
bool polled,
void *data)
{
xfs_agnumber_t start_agno = XFS_INO_TO_AGNO(mp, startino);
struct xfs_pwork_ctl pctl;
struct xfs_perag *pag;
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
struct xfs_perag *pag = NULL;
int error;
ASSERT(agno < mp->m_sb.sb_agcount);
ASSERT(start_agno < mp->m_sb.sb_agcount);
ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL));
error = xfs_pwork_init(mp, &pctl, xfs_iwalk_ag_work, "xfs_iwalk");
if (error)
return error;
for_each_perag_from(mp, agno, pag) {
while ((pag = xfs_perag_next_from(mp, pag, start_agno))) {
struct xfs_iwalk_ag *iwag;
if (xfs_pwork_ctl_want_abort(&pctl))
@ -672,8 +679,8 @@ xfs_iwalk_threaded(
iwag->sz_recs = xfs_iwalk_prefetch(inode_records);
iwag->lastino = NULLFSINO;
xfs_pwork_queue(&pctl, &iwag->pwork);
startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0);
if (flags & XFS_INOBT_WALK_SAME_AG)
startino = XFS_AGINO_TO_INO(mp, pag_agno(pag) + 1, 0);
if (flags & XFS_IWALK_SAME_AG)
break;
}
if (pag)
@ -747,30 +754,6 @@ xfs_inobt_walk(
.pwork = XFS_PWORK_SINGLE_THREADED,
.lastino = NULLFSINO,
};
struct xfs_perag *pag;
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
int error;
ASSERT(agno < mp->m_sb.sb_agcount);
ASSERT(!(flags & ~XFS_INOBT_WALK_FLAGS_ALL));
error = xfs_iwalk_alloc(&iwag);
if (error)
return error;
for_each_perag_from(mp, agno, pag) {
iwag.pag = pag;
error = xfs_iwalk_ag(&iwag);
if (error)
break;
iwag.startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0);
if (flags & XFS_INOBT_WALK_SAME_AG)
break;
iwag.pag = NULL;
}
if (iwag.pag)
xfs_perag_rele(pag);
xfs_iwalk_free(&iwag);
return error;
return xfs_iwalk_args(&iwag, flags);
}

View File

@ -25,7 +25,7 @@ int xfs_iwalk_threaded(struct xfs_mount *mp, xfs_ino_t startino,
unsigned int flags, xfs_iwalk_fn iwalk_fn,
unsigned int inode_records, bool poll, void *data);
/* Only iterate inodes within the same AG as @startino. */
/* Only iterate within the same AG as @startino. */
#define XFS_IWALK_SAME_AG (1U << 0)
#define XFS_IWALK_FLAGS_ALL (XFS_IWALK_SAME_AG)
@ -41,9 +41,4 @@ int xfs_inobt_walk(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_inobt_walk_fn inobt_walk_fn, unsigned int inobt_records,
void *data);
/* Only iterate inobt records within the same AG as @startino. */
#define XFS_INOBT_WALK_SAME_AG (XFS_IWALK_SAME_AG)
#define XFS_INOBT_WALK_FLAGS_ALL (XFS_INOBT_WALK_SAME_AG)
#endif /* __XFS_IWALK_H__ */

View File

@ -2677,7 +2677,7 @@ xlog_recover_clear_agi_bucket(
struct xfs_perag *pag,
int bucket)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_trans *tp;
struct xfs_agi *agi;
struct xfs_buf *agibp;
@ -2708,7 +2708,7 @@ out_abort:
xfs_trans_cancel(tp);
out_error:
xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__,
pag->pag_agno);
pag_agno(pag));
return;
}
@ -2718,7 +2718,7 @@ xlog_recover_iunlink_bucket(
struct xfs_agi *agi,
int bucket)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_mount *mp = pag_mount(pag);
struct xfs_inode *prev_ip = NULL;
struct xfs_inode *ip;
xfs_agino_t prev_agino, agino;
@ -2845,10 +2845,9 @@ static void
xlog_recover_process_iunlinks(
struct xlog *log)
{
struct xfs_perag *pag;
xfs_agnumber_t agno;
struct xfs_perag *pag = NULL;
for_each_perag(log->l_mp, agno, pag)
while ((pag = xfs_perag_next(log->l_mp, pag)))
xlog_recover_iunlink_ag(pag);
}

View File

@ -71,6 +71,40 @@ struct xfs_inodegc {
unsigned int cpu;
};
/*
* Container for each type of groups, used to look up individual groups and
* describes the geometry.
*/
struct xfs_groups {
struct xarray xa;
/*
* Maximum capacity of the group in FSBs.
*
* Each group is laid out densely in the daddr space. For the
* degenerate case of a pre-rtgroups filesystem, the incore rtgroup
* pretends to have a zero-block and zero-blklog rtgroup.
*/
uint32_t blocks;
/*
* Log(2) of the logical size of each group.
*
* Compared to the blocks field above this is rounded up to the next
* power of two, and thus lays out the xfs_fsblock_t/xfs_rtblock_t
* space sparsely with a hole from blocks to (1 << blklog) at the end
* of each group.
*/
uint8_t blklog;
/*
* Mask to extract the group-relative block number from a FSB.
* For a pre-rtgroups filesystem we pretend to have one very large
* rtgroup, so this mask must be 64-bit.
*/
uint64_t blkmask;
};
/*
* The struct xfsmount layout is optimised to separate read-mostly variables
* from variables that are frequently modified. We put the read-mostly variables
@ -208,7 +242,7 @@ typedef struct xfs_mount {
*/
atomic64_t m_allocbt_blks;
struct xarray m_perags; /* per-ag accounting info */
struct xfs_groups m_groups[XG_TYPE_MAX];
uint64_t m_resblks; /* total reserved blocks */
uint64_t m_resblks_avail;/* available reserved blocks */
uint64_t m_resblks_save; /* reserved blks @ remount,ro */

View File

@ -244,7 +244,7 @@ xfs_refcount_update_diff_items(
struct xfs_refcount_intent *ra = ci_entry(a);
struct xfs_refcount_intent *rb = ci_entry(b);
return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
return ra->ri_group->xg_gno - rb->ri_group->xg_gno;
}
/* Log refcount updates in the intent item. */
@ -330,7 +330,7 @@ xfs_refcount_defer_add(
trace_xfs_refcount_defer(mp, ri);
ri->ri_pag = xfs_perag_intent_get(mp, ri->ri_startblock);
ri->ri_group = xfs_group_intent_get(mp, ri->ri_startblock, XG_TYPE_AG);
xfs_defer_add(tp, &ri->ri_list, &xfs_refcount_update_defer_type);
}
@ -341,7 +341,7 @@ xfs_refcount_update_cancel_item(
{
struct xfs_refcount_intent *ri = ci_entry(item);
xfs_perag_intent_put(ri->ri_pag);
xfs_group_intent_put(ri->ri_group);
kmem_cache_free(xfs_refcount_intent_cache, ri);
}
@ -431,7 +431,8 @@ xfs_cui_recover_work(
ri->ri_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
ri->ri_startblock = pmap->pe_startblock;
ri->ri_blockcount = pmap->pe_len;
ri->ri_pag = xfs_perag_intent_get(mp, pmap->pe_startblock);
ri->ri_group = xfs_group_intent_get(mp, pmap->pe_startblock,
XG_TYPE_AG);
xfs_defer_add_item(dfp, &ri->ri_list);
}

View File

@ -144,7 +144,7 @@ xfs_reflink_find_shared(
if (error)
return error;
cur = xfs_refcountbt_init_cursor(pag->pag_mount, tp, agbp, pag);
cur = xfs_refcountbt_init_cursor(pag_mount(pag), tp, agbp, pag);
error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
find_end_of_shared);
@ -894,14 +894,13 @@ int
xfs_reflink_recover_cow(
struct xfs_mount *mp)
{
struct xfs_perag *pag;
xfs_agnumber_t agno;
struct xfs_perag *pag = NULL;
int error = 0;
if (!xfs_has_reflink(mp))
return 0;
for_each_perag(mp, agno, pag) {
while ((pag = xfs_perag_next(mp, pag))) {
error = xfs_refcount_recover_cow_leftovers(mp, pag);
if (error) {
xfs_perag_rele(pag);

View File

@ -243,7 +243,7 @@ xfs_rmap_update_diff_items(
struct xfs_rmap_intent *ra = ri_entry(a);
struct xfs_rmap_intent *rb = ri_entry(b);
return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
return ra->ri_group->xg_gno - rb->ri_group->xg_gno;
}
/* Log rmap updates in the intent item. */
@ -353,7 +353,8 @@ xfs_rmap_defer_add(
trace_xfs_rmap_defer(mp, ri);
ri->ri_pag = xfs_perag_intent_get(mp, ri->ri_bmap.br_startblock);
ri->ri_group = xfs_group_intent_get(mp, ri->ri_bmap.br_startblock,
XG_TYPE_AG);
xfs_defer_add(tp, &ri->ri_list, &xfs_rmap_update_defer_type);
}
@ -364,7 +365,7 @@ xfs_rmap_update_cancel_item(
{
struct xfs_rmap_intent *ri = ri_entry(item);
xfs_perag_intent_put(ri->ri_pag);
xfs_group_intent_put(ri->ri_group);
kmem_cache_free(xfs_rmap_intent_cache, ri);
}
@ -494,7 +495,7 @@ xfs_rui_recover_work(
ri->ri_bmap.br_blockcount = map->me_len;
ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
ri->ri_pag = xfs_perag_intent_get(mp, map->me_startblock);
ri->ri_group = xfs_group_intent_get(mp, map->me_startblock, XG_TYPE_AG);
xfs_defer_add_item(dfp, &ri->ri_list);
}

View File

@ -238,7 +238,7 @@ xfs_set_inode_alloc_perag(
xfs_ino_t ino,
xfs_agnumber_t max_metadata)
{
if (!xfs_is_inode32(pag->pag_mount)) {
if (!xfs_is_inode32(pag_mount(pag))) {
set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
return false;
@ -251,7 +251,7 @@ xfs_set_inode_alloc_perag(
}
set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
if (pag->pag_agno < max_metadata)
if (pag_agno(pag) < max_metadata)
set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
else
clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
@ -2011,17 +2011,20 @@ static const struct fs_context_operations xfs_context_ops = {
* mount option parsing having already been performed as this can be called from
* fsopen() before any parameters have been set.
*/
static int xfs_init_fs_context(
static int
xfs_init_fs_context(
struct fs_context *fc)
{
struct xfs_mount *mp;
int i;
mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL);
if (!mp)
return -ENOMEM;
spin_lock_init(&mp->m_sb_lock);
xa_init(&mp->m_perags);
for (i = 0; i < XG_TYPE_MAX; i++)
xa_init(&mp->m_groups[i].xa);
mutex_init(&mp->m_growlock);
INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);

View File

@ -11,6 +11,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_group.h"
#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_inode.h"

View File

@ -72,6 +72,7 @@ struct xfs_btree_cur;
struct xfs_defer_op_type;
struct xfs_refcount_irec;
struct xfs_fsmap;
struct xfs_group;
struct xfs_rmap_irec;
struct xfs_icreate_log;
struct xfs_iunlink_item;
@ -192,10 +193,11 @@ DECLARE_EVENT_CLASS(xfs_perag_class,
__field(unsigned long, caller_ip)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->refcount = atomic_read(&pag->pag_ref);
__entry->active_refcount = atomic_read(&pag->pag_active_ref);
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->refcount = atomic_read(&pag->pag_group.xg_ref);
__entry->active_refcount =
atomic_read(&pag->pag_group.xg_active_ref);
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d agno 0x%x passive refs %d active refs %d caller %pS",
@ -210,16 +212,51 @@ DECLARE_EVENT_CLASS(xfs_perag_class,
DEFINE_EVENT(xfs_perag_class, name, \
TP_PROTO(const struct xfs_perag *pag, unsigned long caller_ip), \
TP_ARGS(pag, caller_ip))
DEFINE_PERAG_REF_EVENT(xfs_perag_get);
DEFINE_PERAG_REF_EVENT(xfs_perag_hold);
DEFINE_PERAG_REF_EVENT(xfs_perag_put);
DEFINE_PERAG_REF_EVENT(xfs_perag_grab);
DEFINE_PERAG_REF_EVENT(xfs_perag_grab_next_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_rele);
DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag);
DEFINE_PERAG_REF_EVENT(xfs_reclaim_inodes_count);
TRACE_DEFINE_ENUM(XG_TYPE_AG);
DECLARE_EVENT_CLASS(xfs_group_class,
TP_PROTO(struct xfs_group *xg, unsigned long caller_ip),
TP_ARGS(xg, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(int, refcount)
__field(int, active_refcount)
__field(unsigned long, caller_ip)
),
TP_fast_assign(
__entry->dev = xg->xg_mount->m_super->s_dev;
__entry->type = xg->xg_type;
__entry->agno = xg->xg_gno;
__entry->refcount = atomic_read(&xg->xg_ref);
__entry->active_refcount = atomic_read(&xg->xg_active_ref);
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d %sno 0x%x passive refs %d active refs %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
__entry->refcount,
__entry->active_refcount,
(char *)__entry->caller_ip)
);
#define DEFINE_GROUP_REF_EVENT(name) \
DEFINE_EVENT(xfs_group_class, name, \
TP_PROTO(struct xfs_group *xg, unsigned long caller_ip), \
TP_ARGS(xg, caller_ip))
DEFINE_GROUP_REF_EVENT(xfs_group_get);
DEFINE_GROUP_REF_EVENT(xfs_group_hold);
DEFINE_GROUP_REF_EVENT(xfs_group_put);
DEFINE_GROUP_REF_EVENT(xfs_group_grab);
DEFINE_GROUP_REF_EVENT(xfs_group_grab_next_tag);
DEFINE_GROUP_REF_EVENT(xfs_group_rele);
TRACE_EVENT(xfs_inodegc_worker,
TP_PROTO(struct xfs_mount *mp, unsigned int shrinker_hits),
TP_ARGS(mp, shrinker_hits),
@ -307,8 +344,8 @@ DECLARE_EVENT_CLASS(xfs_ag_class,
__field(xfs_agnumber_t, agno)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
),
TP_printk("dev %d:%d agno 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
@ -672,9 +709,9 @@ DECLARE_EVENT_CLASS(xfs_filestream_class,
__field(int, streams)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->ino = ino;
__entry->agno = pag->pag_agno;
__entry->agno = pag_agno(pag);
__entry->streams = atomic_read(&pag->pagf_fstrms);
),
TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d",
@ -702,9 +739,9 @@ TRACE_EVENT(xfs_filestream_pick,
__field(xfs_extlen_t, free)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->ino = ino;
__entry->agno = pag->pag_agno;
__entry->agno = pag_agno(pag);
__entry->streams = atomic_read(&pag->pagf_fstrms);
__entry->free = pag->pagf_freeblks;
),
@ -912,8 +949,8 @@ TRACE_EVENT(xfs_irec_merge_pre,
__field(uint16_t, nholemask)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agino = rec->ir_startino;
__entry->holemask = rec->ir_holemask;
__entry->nagino = nrec->ir_startino;
@ -939,8 +976,8 @@ TRACE_EVENT(xfs_irec_merge_post,
__field(uint16_t, holemask)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agino = nrec->ir_startino;
__entry->holemask = nrec->ir_holemask;
),
@ -1640,43 +1677,48 @@ TRACE_EVENT(xfs_bunmap,
);
DECLARE_EVENT_CLASS(xfs_extent_busy_class,
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno,
xfs_extlen_t len),
TP_ARGS(pag, agbno, len),
TP_ARGS(xg, agbno, len),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, len)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = xg->xg_mount->m_super->s_dev;
__entry->type = xg->xg_type;
__entry->agno = xg->xg_gno;
__entry->agbno = agbno;
__entry->len = len;
),
TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x",
TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
__print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agbno,
__entry->len)
);
#define DEFINE_BUSY_EVENT(name) \
DEFINE_EVENT(xfs_extent_busy_class, name, \
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, \
xfs_extlen_t len), \
TP_ARGS(pag, agbno, len))
TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, \
xfs_extlen_t len), \
TP_ARGS(xg, agbno, len))
DEFINE_BUSY_EVENT(xfs_extent_busy);
DEFINE_BUSY_EVENT(xfs_extent_busy_force);
DEFINE_BUSY_EVENT(xfs_extent_busy_reuse);
DEFINE_BUSY_EVENT(xfs_extent_busy_clear);
TRACE_EVENT(xfs_extent_busy_trim,
TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno,
TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno,
xfs_extlen_t len, xfs_agblock_t tbno, xfs_extlen_t tlen),
TP_ARGS(pag, agbno, len, tbno, tlen),
TP_ARGS(xg, agbno, len, tbno, tlen),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(enum xfs_group_type, type)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, len)
@ -1684,16 +1726,19 @@ TRACE_EVENT(xfs_extent_busy_trim,
__field(xfs_extlen_t, tlen)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = xg->xg_mount->m_super->s_dev;
__entry->type = xg->xg_type;
__entry->agno = xg->xg_gno;
__entry->agbno = agbno;
__entry->len = len;
__entry->tbno = tbno;
__entry->tlen = tlen;
),
TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x found_agbno 0x%x found_fsbcount 0x%x",
TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x found_agbno 0x%x found_fsbcount 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agno,
__print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->agbno,
__entry->len,
__entry->tbno,
@ -1777,8 +1822,8 @@ TRACE_EVENT(xfs_free_extent,
__field(int, haveright)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agbno = agbno;
__entry->len = len;
__entry->resv = resv;
@ -2441,8 +2486,8 @@ DECLARE_EVENT_CLASS(xfs_discard_class,
__field(xfs_extlen_t, len)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->agbno = agbno;
__entry->len = len;
),
@ -2547,7 +2592,7 @@ TRACE_EVENT(xfs_btree_alloc_block,
__entry->ino = cur->bc_ino.ip->i_ino;
break;
case XFS_BTREE_TYPE_AG:
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->ino = 0;
break;
case XFS_BTREE_TYPE_MEM:
@ -2803,7 +2848,7 @@ DECLARE_EVENT_CLASS(xfs_rmap_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->agbno = agbno;
__entry->len = len;
__entry->owner = oinfo->oi_owner;
@ -2848,7 +2893,7 @@ DECLARE_EVENT_CLASS(xfs_btree_error_class,
__entry->ino = cur->bc_ino.ip->i_ino;
break;
case XFS_BTREE_TYPE_AG:
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->ino = 0;
break;
case XFS_BTREE_TYPE_MEM:
@ -2902,7 +2947,7 @@ TRACE_EVENT(xfs_rmap_convert_state,
__entry->ino = cur->bc_ino.ip->i_ino;
break;
case XFS_BTREE_TYPE_AG:
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->ino = 0;
break;
case XFS_BTREE_TYPE_MEM:
@ -2937,7 +2982,7 @@ DECLARE_EVENT_CLASS(xfs_rmapbt_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->agbno = agbno;
__entry->len = len;
__entry->owner = owner;
@ -3110,8 +3155,8 @@ DECLARE_EVENT_CLASS(xfs_ag_resv_class,
TP_fast_assign(
struct xfs_ag_resv *r = xfs_perag_resv(pag, resv);
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->resv = resv;
__entry->freeblks = pag->pagf_freeblks;
__entry->flcount = pag->pagf_flcount;
@ -3155,8 +3200,8 @@ TRACE_EVENT(xfs_ag_resv_init_error,
__field(unsigned long, caller_ip)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->error = error;
__entry->caller_ip = caller_ip;
),
@ -3181,7 +3226,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->agbno = agbno;
__entry->len = len;
),
@ -3212,7 +3257,7 @@ TRACE_EVENT(xfs_refcount_lookup,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->agbno = agbno;
__entry->dir = dir;
),
@ -3238,7 +3283,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->domain = irec->rc_domain;
__entry->startblock = irec->rc_startblock;
__entry->blockcount = irec->rc_blockcount;
@ -3274,7 +3319,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->domain = irec->rc_domain;
__entry->startblock = irec->rc_startblock;
__entry->blockcount = irec->rc_blockcount;
@ -3316,7 +3361,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->i1_domain = i1->rc_domain;
__entry->i1_startblock = i1->rc_startblock;
__entry->i1_blockcount = i1->rc_blockcount;
@ -3366,7 +3411,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->i1_domain = i1->rc_domain;
__entry->i1_startblock = i1->rc_startblock;
__entry->i1_blockcount = i1->rc_blockcount;
@ -3421,7 +3466,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->i1_domain = i1->rc_domain;
__entry->i1_startblock = i1->rc_startblock;
__entry->i1_blockcount = i1->rc_blockcount;
@ -4044,8 +4089,8 @@ TRACE_EVENT(xfs_iunlink_update_bucket,
__field(xfs_agino_t, new_ptr)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->bucket = bucket;
__entry->old_ptr = old_ptr;
__entry->new_ptr = new_ptr;
@ -4069,8 +4114,8 @@ TRACE_EVENT(xfs_iunlink_update_dinode,
__field(xfs_agino_t, new_ptr)
),
TP_fast_assign(
__entry->dev = iup->pag->pag_mount->m_super->s_dev;
__entry->agno = iup->pag->pag_agno;
__entry->dev = pag_mount(iup->pag)->m_super->s_dev;
__entry->agno = pag_agno(iup->pag);
__entry->agino =
XFS_INO_TO_AGINO(iup->ip->i_mount, iup->ip->i_ino);
__entry->old_ptr = old_ptr;
@ -4182,31 +4227,34 @@ DEFINE_FS_CORRUPT_EVENT(xfs_rt_mark_corrupt);
DEFINE_FS_CORRUPT_EVENT(xfs_rt_mark_healthy);
DEFINE_FS_CORRUPT_EVENT(xfs_rt_unfixed_corruption);
DECLARE_EVENT_CLASS(xfs_ag_corrupt_class,
TP_PROTO(const struct xfs_perag *pag, unsigned int flags),
TP_ARGS(pag, flags),
DECLARE_EVENT_CLASS(xfs_group_corrupt_class,
TP_PROTO(const struct xfs_group *xg, unsigned int flags),
TP_ARGS(xg, flags),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(enum xfs_group_type, type)
__field(uint32_t, index)
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = xg->xg_mount->m_super->s_dev;
__entry->type = xg->xg_type;
__entry->index = xg->xg_gno;
__entry->flags = flags;
),
TP_printk("dev %d:%d agno 0x%x flags 0x%x",
TP_printk("dev %d:%d %sno 0x%x flags 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno, __entry->flags)
__print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->index, __entry->flags)
);
#define DEFINE_AG_CORRUPT_EVENT(name) \
DEFINE_EVENT(xfs_ag_corrupt_class, name, \
TP_PROTO(const struct xfs_perag *pag, unsigned int flags), \
TP_ARGS(pag, flags))
DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_sick);
DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_corrupt);
DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_healthy);
DEFINE_AG_CORRUPT_EVENT(xfs_ag_unfixed_corruption);
#define DEFINE_GROUP_CORRUPT_EVENT(name) \
DEFINE_EVENT(xfs_group_corrupt_class, name, \
TP_PROTO(const struct xfs_group *xg, unsigned int flags), \
TP_ARGS(xg, flags))
DEFINE_GROUP_CORRUPT_EVENT(xfs_group_mark_sick);
DEFINE_GROUP_CORRUPT_EVENT(xfs_group_mark_corrupt);
DEFINE_GROUP_CORRUPT_EVENT(xfs_group_mark_healthy);
DEFINE_GROUP_CORRUPT_EVENT(xfs_group_unfixed_corruption);
DECLARE_EVENT_CLASS(xfs_inode_corrupt_class,
TP_PROTO(struct xfs_inode *ip, unsigned int flags),
@ -4245,8 +4293,8 @@ TRACE_EVENT(xfs_iwalk_ag_rec,
__field(uint64_t, freemask)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->dev = pag_mount(pag)->m_super->s_dev;
__entry->agno = pag_agno(pag);
__entry->startino = irec->ir_startino;
__entry->freemask = irec->ir_free;
),
@ -4308,7 +4356,7 @@ TRACE_EVENT(xfs_btree_commit_afakeroot,
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__assign_str(name);
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->agbno = cur->bc_ag.afake->af_root;
__entry->levels = cur->bc_ag.afake->af_levels;
__entry->blocks = cur->bc_ag.afake->af_blocks;
@ -4423,7 +4471,7 @@ TRACE_EVENT(xfs_btree_bload_block,
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb);
__entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb);
} else {
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agno = cur->bc_group->xg_gno;
__entry->agbno = be32_to_cpu(ptr->s);
}
__entry->nr_records = nr_records;
@ -4648,35 +4696,39 @@ TRACE_EVENT(xfs_force_shutdown,
);
#ifdef CONFIG_XFS_DRAIN_INTENTS
DECLARE_EVENT_CLASS(xfs_perag_intents_class,
TP_PROTO(const struct xfs_perag *pag, void *caller_ip),
TP_ARGS(pag, caller_ip),
DECLARE_EVENT_CLASS(xfs_group_intents_class,
TP_PROTO(const struct xfs_group *xg, void *caller_ip),
TP_ARGS(xg, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(enum xfs_group_type, type)
__field(uint32_t, index)
__field(long, nr_intents)
__field(void *, caller_ip)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->nr_intents = atomic_read(&pag->pag_intents_drain.dr_count);
__entry->dev = xg->xg_mount->m_super->s_dev;
__entry->type = xg->xg_type;
__entry->index = xg->xg_gno;
__entry->nr_intents =
atomic_read(&xg->xg_intents_drain.dr_count);
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d agno 0x%x intents %ld caller %pS",
TP_printk("dev %d:%d %sno 0x%x intents %ld caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->index,
__entry->nr_intents,
__entry->caller_ip)
);
#define DEFINE_PERAG_INTENTS_EVENT(name) \
DEFINE_EVENT(xfs_perag_intents_class, name, \
TP_PROTO(const struct xfs_perag *pag, void *caller_ip), \
TP_ARGS(pag, caller_ip))
DEFINE_PERAG_INTENTS_EVENT(xfs_perag_intent_hold);
DEFINE_PERAG_INTENTS_EVENT(xfs_perag_intent_rele);
DEFINE_PERAG_INTENTS_EVENT(xfs_perag_wait_intents);
#define DEFINE_GROUP_INTENTS_EVENT(name) \
DEFINE_EVENT(xfs_group_intents_class, name, \
TP_PROTO(const struct xfs_group *xg, void *caller_ip), \
TP_ARGS(xg, caller_ip))
DEFINE_GROUP_INTENTS_EVENT(xfs_group_intent_hold);
DEFINE_GROUP_INTENTS_EVENT(xfs_group_intent_rele);
DEFINE_GROUP_INTENTS_EVENT(xfs_group_wait_intents);
#endif /* CONFIG_XFS_DRAIN_INTENTS */