xfs: make the RT allocator rtgroup aware

Make the allocator rtgroup aware by either picking a specific group if
there is a hint, or loop over all groups otherwise.  A simple rotor is
provided to pick the placement for initial allocations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
Christoph Hellwig 2024-11-03 20:19:29 -08:00 committed by Darrick J. Wong
parent b91afef724
commit d162491c54
4 changed files with 105 additions and 13 deletions

View File

@ -3151,8 +3151,17 @@ xfs_bmap_adjacent_valid(
struct xfs_mount *mp = ap->ip->i_mount;
if (XFS_IS_REALTIME_INODE(ap->ip) &&
(ap->datatype & XFS_ALLOC_USERDATA))
return x < mp->m_sb.sb_rblocks;
(ap->datatype & XFS_ALLOC_USERDATA)) {
if (x >= mp->m_sb.sb_rblocks)
return false;
if (!xfs_has_rtgroups(mp))
return true;
return xfs_rtb_to_rgno(mp, x) == xfs_rtb_to_rgno(mp, y) &&
xfs_rtb_to_rgno(mp, x) < mp->m_sb.sb_rgcount &&
xfs_rtb_to_rtx(mp, x) < mp->m_sb.sb_rgextents;
}
return XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) &&
XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount &&

View File

@ -1084,11 +1084,13 @@ xfs_rtfree_extent(
* Mark more blocks free in the superblock.
*/
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len);
/*
* If we've now freed all the blocks, reset the file sequence
* number to 0.
* number to 0 for pre-RTG file systems.
*/
if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
if (!xfs_has_rtgroups(mp) &&
tp->t_frextents_delta + mp->m_sb.sb_frextents ==
mp->m_sb.sb_rextents) {
if (!(rbmip->i_diflags & XFS_DIFLAG_NEWRTBM))
rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;

View File

@ -255,6 +255,7 @@ typedef struct xfs_mount {
#endif
xfs_agnumber_t m_agfrotor; /* last ag where space found */
atomic_t m_agirotor; /* last ag dir inode alloced */
atomic_t m_rtgrotor; /* last rtgroup rtpicked */
/* Memory shrinker to throttle and reprioritize inodegc */
struct shrinker *m_inodegc_shrinker;

View File

@ -1662,8 +1662,9 @@ xfs_rtalloc_align_minmax(
}
static int
xfs_rtallocate(
xfs_rtallocate_rtg(
struct xfs_trans *tp,
xfs_rgnumber_t rgno,
xfs_rtblock_t bno_hint,
xfs_rtxlen_t minlen,
xfs_rtxlen_t maxlen,
@ -1683,16 +1684,33 @@ xfs_rtallocate(
xfs_rtxlen_t len = 0;
int error = 0;
args.rtg = xfs_rtgroup_grab(args.mp, 0);
args.rtg = xfs_rtgroup_grab(args.mp, rgno);
if (!args.rtg)
return -ENOSPC;
/*
* Lock out modifications to both the RT bitmap and summary inodes.
* We need to lock out modifications to both the RT bitmap and summary
* inodes for finding free space in xfs_rtallocate_extent_{near,size}
* and join the bitmap and summary inodes for the actual allocation
* down in xfs_rtallocate_range.
*
* For RTG-enabled file system we don't want to join the inodes to the
* transaction until we are committed to allocate to allocate from this
* RTG so that only one inode of each type is locked at a time.
*
* But for pre-RTG file systems we need to already to join the bitmap
* inode to the transaction for xfs_rtpick_extent, which bumps the
* sequence number in it, so we'll have to join the inode to the
* transaction early here.
*
* This is all a bit messy, but at least the mess is contained in
* this function.
*/
if (!*rtlocked) {
xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP);
xfs_rtgroup_trans_join(tp, args.rtg, XFS_RTGLOCK_BITMAP);
if (!xfs_has_rtgroups(args.mp))
xfs_rtgroup_trans_join(tp, args.rtg,
XFS_RTGLOCK_BITMAP);
*rtlocked = true;
}
@ -1702,7 +1720,7 @@ xfs_rtallocate(
*/
if (bno_hint)
start = xfs_rtb_to_rtx(args.mp, bno_hint);
else if (initial_user_data)
else if (!xfs_has_rtgroups(args.mp) && initial_user_data)
start = xfs_rtpick_extent(args.rtg, tp, maxlen);
if (start) {
@ -1723,8 +1741,16 @@ xfs_rtallocate(
prod, &rtx);
}
if (error)
if (error) {
if (xfs_has_rtgroups(args.mp)) {
xfs_rtgroup_unlock(args.rtg, XFS_RTGLOCK_BITMAP);
*rtlocked = false;
}
goto out_release;
}
if (xfs_has_rtgroups(args.mp))
xfs_rtgroup_trans_join(tp, args.rtg, XFS_RTGLOCK_BITMAP);
error = xfs_rtallocate_range(&args, rtx, len);
if (error)
@ -1742,6 +1768,53 @@ out_release:
return error;
}
static int
xfs_rtallocate_rtgs(
struct xfs_trans *tp,
xfs_fsblock_t bno_hint,
xfs_rtxlen_t minlen,
xfs_rtxlen_t maxlen,
xfs_rtxlen_t prod,
bool wasdel,
bool initial_user_data,
xfs_rtblock_t *bno,
xfs_extlen_t *blen)
{
struct xfs_mount *mp = tp->t_mountp;
xfs_rgnumber_t start_rgno, rgno;
int error;
/*
* For now this just blindly iterates over the RTGs for an initial
* allocation. We could try to keep an in-memory rtg_longest member
* to avoid the locking when just looking for big enough free space,
* but for now this keeps things simple.
*/
if (bno_hint != NULLFSBLOCK)
start_rgno = xfs_rtb_to_rgno(mp, bno_hint);
else
start_rgno = (atomic_inc_return(&mp->m_rtgrotor) - 1) %
mp->m_sb.sb_rgcount;
rgno = start_rgno;
do {
bool rtlocked = false;
error = xfs_rtallocate_rtg(tp, rgno, bno_hint, minlen, maxlen,
prod, wasdel, initial_user_data, &rtlocked,
bno, blen);
if (error != -ENOSPC)
return error;
ASSERT(!rtlocked);
if (++rgno == mp->m_sb.sb_rgcount)
rgno = 0;
bno_hint = NULLFSBLOCK;
} while (rgno != start_rgno);
return -ENOSPC;
}
static int
xfs_rtallocate_align(
struct xfs_bmalloca *ap,
@ -1836,9 +1909,16 @@ retry:
if (xfs_bmap_adjacent(ap))
bno_hint = ap->blkno;
error = xfs_rtallocate(ap->tp, bno_hint, raminlen, ralen, prod,
ap->wasdel, initial_user_data, &rtlocked,
&ap->blkno, &ap->length);
if (xfs_has_rtgroups(ap->ip->i_mount)) {
error = xfs_rtallocate_rtgs(ap->tp, bno_hint, raminlen, ralen,
prod, ap->wasdel, initial_user_data,
&ap->blkno, &ap->length);
} else {
error = xfs_rtallocate_rtg(ap->tp, 0, bno_hint, raminlen, ralen,
prod, ap->wasdel, initial_user_data,
&rtlocked, &ap->blkno, &ap->length);
}
if (error == -ENOSPC) {
if (!noalign) {
/*