mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
xfs: create incore rt allocation groups [v5.5 04/10]
Add in-memory data structures for sharding the realtime volume into independent allocation groups. For existing filesystems, the entire rt volume is modelled as having a single large group, with (potentially) a number of rt extents exceeding 2^32 blocks, though these are not likely to exist because the codebase has been a bit broken for decades. The next series fills in the ondisk format and other supporting structures. With a bit of luck, this should all go splendidly. Signed-off-by: Darrick J. Wong <djwong@kernel.org> -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQ2qTKExjcn+O1o2YRKO3ySh0YRpgUCZyqQdAAKCRBKO3ySh0YR ptrrAP41PURivFpHWXqg0sajsIUUezhuAdfg41fJqOop81qWDAEA2CsLf1z0c9/P CQS/tlQ3xdwZ0MYZMaw2o0EgSHYjwg8= =qVdv -----END PGP SIGNATURE----- Merge tag 'incore-rtgroups-6.13_2024-11-05' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into staging-merge xfs: create incore rt allocation groups [v5.5 04/10] Add in-memory data structures for sharding the realtime volume into independent allocation groups. For existing filesystems, the entire rt volume is modelled as having a single large group, with (potentially) a number of rt extents exceeding 2^32 blocks, though these are not likely to exist because the codebase has been a bit broken for decades. The next series fills in the ondisk format and other supporting structures. With a bit of luck, this should all go splendidly. Signed-off-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
commit
6b3582aca3
@ -61,6 +61,7 @@ xfs-y += $(addprefix libxfs/, \
|
||||
# xfs_rtbitmap is shared with libxfs
|
||||
xfs-$(CONFIG_XFS_RT) += $(addprefix libxfs/, \
|
||||
xfs_rtbitmap.o \
|
||||
xfs_rtgroup.o \
|
||||
)
|
||||
|
||||
# highlevel code
|
||||
|
@ -4094,7 +4094,7 @@ retry:
|
||||
|
||||
fdblocks = indlen;
|
||||
if (XFS_IS_REALTIME_INODE(ip)) {
|
||||
error = xfs_dec_frextents(mp, xfs_rtb_to_rtx(mp, alen));
|
||||
error = xfs_dec_frextents(mp, xfs_blen_to_rtbxlen(mp, alen));
|
||||
if (error)
|
||||
goto out_unreserve_quota;
|
||||
} else {
|
||||
@ -4129,7 +4129,7 @@ retry:
|
||||
|
||||
out_unreserve_frextents:
|
||||
if (XFS_IS_REALTIME_INODE(ip))
|
||||
xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, alen));
|
||||
xfs_add_frextents(mp, xfs_blen_to_rtbxlen(mp, alen));
|
||||
out_unreserve_quota:
|
||||
if (XFS_IS_QUOTA_ON(mp))
|
||||
xfs_quota_unreserve_blkres(ip, alen);
|
||||
@ -5037,7 +5037,7 @@ xfs_bmap_del_extent_delay(
|
||||
fdblocks = da_diff;
|
||||
|
||||
if (isrt)
|
||||
xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount));
|
||||
xfs_add_frextents(mp, xfs_blen_to_rtbxlen(mp, del->br_blockcount));
|
||||
else
|
||||
fdblocks += del->br_blockcount;
|
||||
|
||||
@ -5116,6 +5116,34 @@ xfs_bmap_del_extent_cow(
|
||||
ip->i_delayed_blks -= del->br_blockcount;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_bmap_free_rtblocks(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_bmbt_irec *del)
|
||||
{
|
||||
struct xfs_rtgroup *rtg;
|
||||
int error;
|
||||
|
||||
rtg = xfs_rtgroup_grab(tp->t_mountp, 0);
|
||||
if (!rtg)
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* Ensure the bitmap and summary inodes are locked and joined to the
|
||||
* transaction before modifying them.
|
||||
*/
|
||||
if (!(tp->t_flags & XFS_TRANS_RTBITMAP_LOCKED)) {
|
||||
tp->t_flags |= XFS_TRANS_RTBITMAP_LOCKED;
|
||||
xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP);
|
||||
xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_BITMAP);
|
||||
}
|
||||
|
||||
error = xfs_rtfree_blocks(tp, rtg, del->br_startblock,
|
||||
del->br_blockcount);
|
||||
xfs_rtgroup_rele(rtg);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by xfs_bmapi to update file extent records and the btree
|
||||
* after removing space.
|
||||
@ -5331,17 +5359,7 @@ xfs_bmap_del_extent_real(
|
||||
if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
|
||||
xfs_refcount_decrease_extent(tp, del);
|
||||
} else if (xfs_ifork_is_realtime(ip, whichfork)) {
|
||||
/*
|
||||
* Ensure the bitmap and summary inodes are locked
|
||||
* and joined to the transaction before modifying them.
|
||||
*/
|
||||
if (!(tp->t_flags & XFS_TRANS_RTBITMAP_LOCKED)) {
|
||||
tp->t_flags |= XFS_TRANS_RTBITMAP_LOCKED;
|
||||
xfs_rtbitmap_lock(mp);
|
||||
xfs_rtbitmap_trans_join(tp);
|
||||
}
|
||||
error = xfs_rtfree_blocks(tp, del->br_startblock,
|
||||
del->br_blockcount);
|
||||
error = xfs_bmap_free_rtblocks(tp, del);
|
||||
} else {
|
||||
unsigned int efi_flags = 0;
|
||||
|
||||
|
@ -176,6 +176,9 @@ typedef struct xfs_sb {
|
||||
|
||||
xfs_ino_t sb_metadirino; /* metadata directory tree root */
|
||||
|
||||
xfs_rgnumber_t sb_rgcount; /* number of realtime groups */
|
||||
xfs_rtxlen_t sb_rgextents; /* size of a realtime group in rtx */
|
||||
|
||||
/* must be padded to 64 bit alignment */
|
||||
} xfs_sb_t;
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "xfs_health.h"
|
||||
#include "xfs_sb.h"
|
||||
|
||||
/*
|
||||
* Realtime allocator bitmap functions shared with userspace.
|
||||
@ -90,12 +91,12 @@ xfs_rtbuf_get(
|
||||
if (issum) {
|
||||
cbpp = &args->sumbp;
|
||||
coffp = &args->sumoff;
|
||||
ip = mp->m_rsumip;
|
||||
ip = args->rtg->rtg_inodes[XFS_RTGI_SUMMARY];
|
||||
type = XFS_BLFT_RTSUMMARY_BUF;
|
||||
} else {
|
||||
cbpp = &args->rbmbp;
|
||||
coffp = &args->rbmoff;
|
||||
ip = mp->m_rbmip;
|
||||
ip = args->rtg->rtg_inodes[XFS_RTGI_BITMAP];
|
||||
type = XFS_BLFT_RTBITMAP_BUF;
|
||||
}
|
||||
|
||||
@ -503,6 +504,7 @@ xfs_rtmodify_summary(
|
||||
{
|
||||
struct xfs_mount *mp = args->mp;
|
||||
xfs_rtsumoff_t so = xfs_rtsumoffs(mp, log, bbno);
|
||||
uint8_t *rsum_cache = args->rtg->rtg_rsum_cache;
|
||||
unsigned int infoword;
|
||||
xfs_suminfo_t val;
|
||||
int error;
|
||||
@ -514,11 +516,11 @@ xfs_rtmodify_summary(
|
||||
infoword = xfs_rtsumoffs_to_infoword(mp, so);
|
||||
val = xfs_suminfo_add(args, infoword, delta);
|
||||
|
||||
if (mp->m_rsum_cache) {
|
||||
if (val == 0 && log + 1 == mp->m_rsum_cache[bbno])
|
||||
mp->m_rsum_cache[bbno] = log;
|
||||
if (val != 0 && log >= mp->m_rsum_cache[bbno])
|
||||
mp->m_rsum_cache[bbno] = log + 1;
|
||||
if (rsum_cache) {
|
||||
if (val == 0 && log + 1 == rsum_cache[bbno])
|
||||
rsum_cache[bbno] = log;
|
||||
if (val != 0 && log >= rsum_cache[bbno])
|
||||
rsum_cache[bbno] = log + 1;
|
||||
}
|
||||
|
||||
xfs_trans_log_rtsummary(args, infoword);
|
||||
@ -737,7 +739,7 @@ xfs_rtfree_range(
|
||||
/*
|
||||
* Find the next allocated block (end of allocated extent).
|
||||
*/
|
||||
error = xfs_rtfind_forw(args, end, mp->m_sb.sb_rextents - 1,
|
||||
error = xfs_rtfind_forw(args, end, args->rtg->rtg_extents - 1,
|
||||
&postblock);
|
||||
if (error)
|
||||
return error;
|
||||
@ -961,19 +963,22 @@ xfs_rtcheck_alloc_range(
|
||||
int
|
||||
xfs_rtfree_extent(
|
||||
struct xfs_trans *tp, /* transaction pointer */
|
||||
struct xfs_rtgroup *rtg,
|
||||
xfs_rtxnum_t start, /* starting rtext number to free */
|
||||
xfs_rtxlen_t len) /* length of extent freed */
|
||||
{
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
|
||||
struct xfs_rtalloc_args args = {
|
||||
.mp = mp,
|
||||
.tp = tp,
|
||||
.rtg = rtg,
|
||||
};
|
||||
int error;
|
||||
struct timespec64 atime;
|
||||
|
||||
ASSERT(mp->m_rbmip->i_itemp != NULL);
|
||||
xfs_assert_ilocked(mp->m_rbmip, XFS_ILOCK_EXCL);
|
||||
ASSERT(rbmip->i_itemp != NULL);
|
||||
xfs_assert_ilocked(rbmip, XFS_ILOCK_EXCL);
|
||||
|
||||
error = xfs_rtcheck_alloc_range(&args, start, len);
|
||||
if (error)
|
||||
@ -996,13 +1001,13 @@ xfs_rtfree_extent(
|
||||
*/
|
||||
if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
|
||||
mp->m_sb.sb_rextents) {
|
||||
if (!(mp->m_rbmip->i_diflags & XFS_DIFLAG_NEWRTBM))
|
||||
mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
|
||||
if (!(rbmip->i_diflags & XFS_DIFLAG_NEWRTBM))
|
||||
rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
|
||||
|
||||
atime = inode_get_atime(VFS_I(mp->m_rbmip));
|
||||
atime = inode_get_atime(VFS_I(rbmip));
|
||||
atime.tv_sec = 0;
|
||||
inode_set_atime_to_ts(VFS_I(mp->m_rbmip), atime);
|
||||
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
|
||||
inode_set_atime_to_ts(VFS_I(rbmip), atime);
|
||||
xfs_trans_log_inode(tp, rbmip, XFS_ILOG_CORE);
|
||||
}
|
||||
error = 0;
|
||||
out:
|
||||
@ -1018,6 +1023,7 @@ out:
|
||||
int
|
||||
xfs_rtfree_blocks(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
xfs_fsblock_t rtbno,
|
||||
xfs_filblks_t rtlen)
|
||||
{
|
||||
@ -1038,21 +1044,23 @@ xfs_rtfree_blocks(
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return xfs_rtfree_extent(tp, xfs_rtb_to_rtx(mp, rtbno),
|
||||
xfs_rtb_to_rtx(mp, rtlen));
|
||||
return xfs_rtfree_extent(tp, rtg, xfs_rtb_to_rtx(mp, rtbno),
|
||||
xfs_extlen_to_rtxlen(mp, rtlen));
|
||||
}
|
||||
|
||||
/* Find all the free records within a given range. */
|
||||
int
|
||||
xfs_rtalloc_query_range(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_trans *tp,
|
||||
xfs_rtxnum_t start,
|
||||
xfs_rtxnum_t end,
|
||||
xfs_rtalloc_query_range_fn fn,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
struct xfs_rtalloc_args args = {
|
||||
.rtg = rtg,
|
||||
.mp = mp,
|
||||
.tp = tp,
|
||||
};
|
||||
@ -1060,10 +1068,10 @@ xfs_rtalloc_query_range(
|
||||
|
||||
if (start > end)
|
||||
return -EINVAL;
|
||||
if (start == end || start >= mp->m_sb.sb_rextents)
|
||||
if (start == end || start >= rtg->rtg_extents)
|
||||
return 0;
|
||||
|
||||
end = min(end, mp->m_sb.sb_rextents - 1);
|
||||
end = min(end, rtg->rtg_extents - 1);
|
||||
|
||||
/* Iterate the bitmap, looking for discrepancies. */
|
||||
while (start <= end) {
|
||||
@ -1086,7 +1094,7 @@ xfs_rtalloc_query_range(
|
||||
rec.ar_startext = start;
|
||||
rec.ar_extcount = rtend - start + 1;
|
||||
|
||||
error = fn(mp, tp, &rec, priv);
|
||||
error = fn(rtg, tp, &rec, priv);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
@ -1101,26 +1109,27 @@ xfs_rtalloc_query_range(
|
||||
/* Find all the free records. */
|
||||
int
|
||||
xfs_rtalloc_query_all(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_trans *tp,
|
||||
xfs_rtalloc_query_range_fn fn,
|
||||
void *priv)
|
||||
{
|
||||
return xfs_rtalloc_query_range(mp, tp, 0, mp->m_sb.sb_rextents - 1, fn,
|
||||
return xfs_rtalloc_query_range(rtg, tp, 0, rtg->rtg_extents - 1, fn,
|
||||
priv);
|
||||
}
|
||||
|
||||
/* Is the given extent all free? */
|
||||
int
|
||||
xfs_rtalloc_extent_is_free(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_trans *tp,
|
||||
xfs_rtxnum_t start,
|
||||
xfs_rtxlen_t len,
|
||||
bool *is_free)
|
||||
{
|
||||
struct xfs_rtalloc_args args = {
|
||||
.mp = mp,
|
||||
.mp = rtg_mount(rtg),
|
||||
.rtg = rtg,
|
||||
.tp = tp,
|
||||
};
|
||||
xfs_rtxnum_t end;
|
||||
@ -1141,85 +1150,40 @@ xfs_rtalloc_extent_is_free(
|
||||
* extents.
|
||||
*/
|
||||
xfs_filblks_t
|
||||
xfs_rtbitmap_blockcount(
|
||||
xfs_rtbitmap_blockcount_len(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rtbxlen_t rtextents)
|
||||
{
|
||||
return howmany_64(rtextents, NBBY * mp->m_sb.sb_blocksize);
|
||||
}
|
||||
|
||||
/* Compute the number of rtsummary blocks needed to track the given rt space. */
|
||||
/*
|
||||
* Compute the number of rtbitmap blocks used for a given file system.
|
||||
*/
|
||||
xfs_filblks_t
|
||||
xfs_rtbitmap_blockcount(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return xfs_rtbitmap_blockcount_len(mp, mp->m_sb.sb_rextents);
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the geometry of the rtsummary file needed to track the given rt
|
||||
* space.
|
||||
*/
|
||||
xfs_filblks_t
|
||||
xfs_rtsummary_blockcount(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int rsumlevels,
|
||||
xfs_extlen_t rbmblocks)
|
||||
unsigned int *rsumlevels)
|
||||
{
|
||||
unsigned long long rsumwords;
|
||||
|
||||
rsumwords = (unsigned long long)rsumlevels * rbmblocks;
|
||||
*rsumlevels = xfs_compute_rextslog(mp->m_sb.sb_rextents) + 1;
|
||||
|
||||
rsumwords = xfs_rtbitmap_blockcount(mp) * (*rsumlevels);
|
||||
return XFS_B_TO_FSB(mp, rsumwords << XFS_WORDLOG);
|
||||
}
|
||||
|
||||
/* Lock both realtime free space metadata inodes for a freespace update. */
|
||||
void
|
||||
xfs_rtbitmap_lock(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP);
|
||||
xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL | XFS_ILOCK_RTSUM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Join both realtime free space metadata inodes to the transaction. The
|
||||
* ILOCKs will be released on transaction commit.
|
||||
*/
|
||||
void
|
||||
xfs_rtbitmap_trans_join(
|
||||
struct xfs_trans *tp)
|
||||
{
|
||||
xfs_trans_ijoin(tp, tp->t_mountp->m_rbmip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, tp->t_mountp->m_rsumip, XFS_ILOCK_EXCL);
|
||||
}
|
||||
|
||||
/* Unlock both realtime free space metadata inodes after a freespace update. */
|
||||
void
|
||||
xfs_rtbitmap_unlock(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
xfs_iunlock(mp->m_rsumip, XFS_ILOCK_EXCL | XFS_ILOCK_RTSUM);
|
||||
xfs_iunlock(mp->m_rbmip, XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock the realtime free space metadata inodes for a freespace scan. Callers
|
||||
* must walk metadata blocks in order of increasing file offset.
|
||||
*/
|
||||
void
|
||||
xfs_rtbitmap_lock_shared(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int rbmlock_flags)
|
||||
{
|
||||
if (rbmlock_flags & XFS_RBMLOCK_BITMAP)
|
||||
xfs_ilock(mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
|
||||
|
||||
if (rbmlock_flags & XFS_RBMLOCK_SUMMARY)
|
||||
xfs_ilock(mp->m_rsumip, XFS_ILOCK_SHARED | XFS_ILOCK_RTSUM);
|
||||
}
|
||||
|
||||
/* Unlock the realtime free space metadata inodes after a freespace scan. */
|
||||
void
|
||||
xfs_rtbitmap_unlock_shared(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int rbmlock_flags)
|
||||
{
|
||||
if (rbmlock_flags & XFS_RBMLOCK_SUMMARY)
|
||||
xfs_iunlock(mp->m_rsumip, XFS_ILOCK_SHARED | XFS_ILOCK_RTSUM);
|
||||
|
||||
if (rbmlock_flags & XFS_RBMLOCK_BITMAP)
|
||||
xfs_iunlock(mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_rtfile_alloc_blocks(
|
||||
struct xfs_inode *ip,
|
||||
@ -1260,21 +1224,25 @@ out_trans_cancel:
|
||||
/* Get a buffer for the block. */
|
||||
static int
|
||||
xfs_rtfile_initialize_block(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_rtgroup *rtg,
|
||||
enum xfs_rtg_inodes type,
|
||||
xfs_fsblock_t fsbno,
|
||||
void *data)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
struct xfs_inode *ip = rtg->rtg_inodes[type];
|
||||
struct xfs_trans *tp;
|
||||
struct xfs_buf *bp;
|
||||
const size_t copylen = mp->m_blockwsize << XFS_WORDLOG;
|
||||
enum xfs_blft buf_type;
|
||||
int error;
|
||||
|
||||
if (ip == mp->m_rsumip)
|
||||
if (type == XFS_RTGI_BITMAP)
|
||||
buf_type = XFS_BLFT_RTBITMAP_BUF;
|
||||
else if (type == XFS_RTGI_SUMMARY)
|
||||
buf_type = XFS_BLFT_RTSUMMARY_BUF;
|
||||
else
|
||||
buf_type = XFS_BLFT_RTBITMAP_BUF;
|
||||
return -EINVAL;
|
||||
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtzero, 0, 0, 0, &tp);
|
||||
if (error)
|
||||
@ -1306,12 +1274,13 @@ xfs_rtfile_initialize_block(
|
||||
*/
|
||||
int
|
||||
xfs_rtfile_initialize_blocks(
|
||||
struct xfs_inode *ip, /* inode (bitmap/summary) */
|
||||
struct xfs_rtgroup *rtg,
|
||||
enum xfs_rtg_inodes type,
|
||||
xfs_fileoff_t offset_fsb, /* offset to start from */
|
||||
xfs_fileoff_t end_fsb, /* offset to allocate to */
|
||||
void *data) /* data to fill the blocks */
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
const size_t copylen = mp->m_blockwsize << XFS_WORDLOG;
|
||||
|
||||
while (offset_fsb < end_fsb) {
|
||||
@ -1319,8 +1288,8 @@ xfs_rtfile_initialize_blocks(
|
||||
xfs_filblks_t i;
|
||||
int error;
|
||||
|
||||
error = xfs_rtfile_alloc_blocks(ip, offset_fsb,
|
||||
end_fsb - offset_fsb, &map);
|
||||
error = xfs_rtfile_alloc_blocks(rtg->rtg_inodes[type],
|
||||
offset_fsb, end_fsb - offset_fsb, &map);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
@ -1330,7 +1299,7 @@ xfs_rtfile_initialize_blocks(
|
||||
* Do this one block per transaction, to keep it simple.
|
||||
*/
|
||||
for (i = 0; i < map.br_blockcount; i++) {
|
||||
error = xfs_rtfile_initialize_block(ip,
|
||||
error = xfs_rtfile_initialize_block(rtg, type,
|
||||
map.br_startblock + i, data);
|
||||
if (error)
|
||||
return error;
|
||||
@ -1343,3 +1312,35 @@ xfs_rtfile_initialize_blocks(
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_rtbitmap_create(
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_trans *tp,
|
||||
bool init)
|
||||
{
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
|
||||
ip->i_disk_size = mp->m_sb.sb_rbmblocks * mp->m_sb.sb_blocksize;
|
||||
if (init && !xfs_has_rtgroups(mp)) {
|
||||
ip->i_diflags |= XFS_DIFLAG_NEWRTBM;
|
||||
inode_set_atime(VFS_I(ip), 0, 0);
|
||||
}
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_rtsummary_create(
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_trans *tp,
|
||||
bool init)
|
||||
{
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
|
||||
ip->i_disk_size = mp->m_rsumblocks * mp->m_sb.sb_blocksize;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
return 0;
|
||||
}
|
||||
|
@ -6,7 +6,10 @@
|
||||
#ifndef __XFS_RTBITMAP_H__
|
||||
#define __XFS_RTBITMAP_H__
|
||||
|
||||
#include "xfs_rtgroup.h"
|
||||
|
||||
struct xfs_rtalloc_args {
|
||||
struct xfs_rtgroup *rtg;
|
||||
struct xfs_mount *mp;
|
||||
struct xfs_trans *tp;
|
||||
|
||||
@ -19,13 +22,37 @@ struct xfs_rtalloc_args {
|
||||
|
||||
static inline xfs_rtblock_t
|
||||
xfs_rtx_to_rtb(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
xfs_rtxnum_t rtx)
|
||||
{
|
||||
if (mp->m_rtxblklog >= 0)
|
||||
return rtx << mp->m_rtxblklog;
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
xfs_rtblock_t start = xfs_rgno_start_rtb(mp, rtg_rgno(rtg));
|
||||
|
||||
return rtx * mp->m_sb.sb_rextsize;
|
||||
if (mp->m_rtxblklog >= 0)
|
||||
return start + (rtx << mp->m_rtxblklog);
|
||||
return start + (rtx * mp->m_sb.sb_rextsize);
|
||||
}
|
||||
|
||||
/* Convert an rgbno into an rt extent number. */
|
||||
static inline xfs_rtxnum_t
|
||||
xfs_rgbno_to_rtx(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgblock_t rgbno)
|
||||
{
|
||||
if (likely(mp->m_rtxblklog >= 0))
|
||||
return rgbno >> mp->m_rtxblklog;
|
||||
return rgbno / mp->m_sb.sb_rextsize;
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
xfs_rtbxlen_to_blen(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rtbxlen_t rtbxlen)
|
||||
{
|
||||
if (mp->m_rtxblklog >= 0)
|
||||
return rtbxlen << mp->m_rtxblklog;
|
||||
|
||||
return rtbxlen * mp->m_sb.sb_rextsize;
|
||||
}
|
||||
|
||||
static inline xfs_extlen_t
|
||||
@ -62,16 +89,29 @@ xfs_extlen_to_rtxlen(
|
||||
return len / mp->m_sb.sb_rextsize;
|
||||
}
|
||||
|
||||
/* Convert an rt block count into an rt extent count. */
|
||||
static inline xfs_rtbxlen_t
|
||||
xfs_blen_to_rtbxlen(
|
||||
struct xfs_mount *mp,
|
||||
uint64_t blen)
|
||||
{
|
||||
if (likely(mp->m_rtxblklog >= 0))
|
||||
return blen >> mp->m_rtxblklog;
|
||||
|
||||
return div_u64(blen, mp->m_sb.sb_rextsize);
|
||||
}
|
||||
|
||||
/* Convert an rt block number into an rt extent number. */
|
||||
static inline xfs_rtxnum_t
|
||||
xfs_rtb_to_rtx(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rtblock_t rtbno)
|
||||
{
|
||||
if (likely(mp->m_rtxblklog >= 0))
|
||||
return rtbno >> mp->m_rtxblklog;
|
||||
uint64_t __rgbno = __xfs_rtb_to_rgbno(mp, rtbno);
|
||||
|
||||
return div_u64(rtbno, mp->m_sb.sb_rextsize);
|
||||
if (likely(mp->m_rtxblklog >= 0))
|
||||
return __rgbno >> mp->m_rtxblklog;
|
||||
return div_u64(__rgbno, mp->m_sb.sb_rextsize);
|
||||
}
|
||||
|
||||
/* Return the offset of an rt block number within an rt extent. */
|
||||
@ -86,26 +126,6 @@ xfs_rtb_to_rtxoff(
|
||||
return do_div(rtbno, mp->m_sb.sb_rextsize);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert an rt block number into an rt extent number, rounding up to the next
|
||||
* rt extent if the rt block is not aligned to an rt extent boundary.
|
||||
*/
|
||||
static inline xfs_rtxnum_t
|
||||
xfs_rtb_to_rtxup(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rtblock_t rtbno)
|
||||
{
|
||||
if (likely(mp->m_rtxblklog >= 0)) {
|
||||
if (rtbno & mp->m_rtxblkmask)
|
||||
return (rtbno >> mp->m_rtxblklog) + 1;
|
||||
return rtbno >> mp->m_rtxblklog;
|
||||
}
|
||||
|
||||
if (do_div(rtbno, mp->m_sb.sb_rextsize))
|
||||
rtbno++;
|
||||
return rtbno;
|
||||
}
|
||||
|
||||
/* Round this rtblock up to the nearest rt extent size. */
|
||||
static inline xfs_rtblock_t
|
||||
xfs_rtb_roundup_rtx(
|
||||
@ -268,7 +288,7 @@ struct xfs_rtalloc_rec {
|
||||
};
|
||||
|
||||
typedef int (*xfs_rtalloc_query_range_fn)(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_trans *tp,
|
||||
const struct xfs_rtalloc_rec *rec,
|
||||
void *priv);
|
||||
@ -291,53 +311,42 @@ int xfs_rtmodify_summary(struct xfs_rtalloc_args *args, int log,
|
||||
xfs_fileoff_t bbno, int delta);
|
||||
int xfs_rtfree_range(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
|
||||
xfs_rtxlen_t len);
|
||||
int xfs_rtalloc_query_range(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
int xfs_rtalloc_query_range(struct xfs_rtgroup *rtg, struct xfs_trans *tp,
|
||||
xfs_rtxnum_t start, xfs_rtxnum_t end,
|
||||
xfs_rtalloc_query_range_fn fn, void *priv);
|
||||
int xfs_rtalloc_query_all(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
xfs_rtalloc_query_range_fn fn,
|
||||
void *priv);
|
||||
int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
xfs_rtxnum_t start, xfs_rtxlen_t len,
|
||||
bool *is_free);
|
||||
/*
|
||||
* Free an extent in the realtime subvolume. Length is expressed in
|
||||
* realtime extents, as is the block number.
|
||||
*/
|
||||
int /* error */
|
||||
xfs_rtfree_extent(
|
||||
struct xfs_trans *tp, /* transaction pointer */
|
||||
xfs_rtxnum_t start, /* starting rtext number to free */
|
||||
xfs_rtxlen_t len); /* length of extent freed */
|
||||
|
||||
int xfs_rtalloc_query_all(struct xfs_rtgroup *rtg, struct xfs_trans *tp,
|
||||
xfs_rtalloc_query_range_fn fn, void *priv);
|
||||
int xfs_rtalloc_extent_is_free(struct xfs_rtgroup *rtg, struct xfs_trans *tp,
|
||||
xfs_rtxnum_t start, xfs_rtxlen_t len, bool *is_free);
|
||||
int xfs_rtfree_extent(struct xfs_trans *tp, struct xfs_rtgroup *rtg,
|
||||
xfs_rtxnum_t start, xfs_rtxlen_t len);
|
||||
/* Same as above, but in units of rt blocks. */
|
||||
int xfs_rtfree_blocks(struct xfs_trans *tp, xfs_fsblock_t rtbno,
|
||||
xfs_filblks_t rtlen);
|
||||
int xfs_rtfree_blocks(struct xfs_trans *tp, struct xfs_rtgroup *rtg,
|
||||
xfs_fsblock_t rtbno, xfs_filblks_t rtlen);
|
||||
|
||||
xfs_filblks_t xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t
|
||||
rtextents);
|
||||
xfs_filblks_t xfs_rtbitmap_blockcount(struct xfs_mount *mp);
|
||||
xfs_filblks_t xfs_rtbitmap_blockcount_len(struct xfs_mount *mp,
|
||||
xfs_rtbxlen_t rtextents);
|
||||
xfs_filblks_t xfs_rtsummary_blockcount(struct xfs_mount *mp,
|
||||
unsigned int rsumlevels, xfs_extlen_t rbmblocks);
|
||||
unsigned int *rsumlevels);
|
||||
|
||||
int xfs_rtfile_initialize_blocks(struct xfs_inode *ip,
|
||||
xfs_fileoff_t offset_fsb, xfs_fileoff_t end_fsb, void *data);
|
||||
int xfs_rtfile_initialize_blocks(struct xfs_rtgroup *rtg,
|
||||
enum xfs_rtg_inodes type, xfs_fileoff_t offset_fsb,
|
||||
xfs_fileoff_t end_fsb, void *data);
|
||||
int xfs_rtbitmap_create(struct xfs_rtgroup *rtg, struct xfs_inode *ip,
|
||||
struct xfs_trans *tp, bool init);
|
||||
int xfs_rtsummary_create(struct xfs_rtgroup *rtg, struct xfs_inode *ip,
|
||||
struct xfs_trans *tp, bool init);
|
||||
|
||||
void xfs_rtbitmap_lock(struct xfs_mount *mp);
|
||||
void xfs_rtbitmap_unlock(struct xfs_mount *mp);
|
||||
void xfs_rtbitmap_trans_join(struct xfs_trans *tp);
|
||||
|
||||
/* Lock the rt bitmap inode in shared mode */
|
||||
#define XFS_RBMLOCK_BITMAP (1U << 0)
|
||||
/* Lock the rt summary inode in shared mode */
|
||||
#define XFS_RBMLOCK_SUMMARY (1U << 1)
|
||||
|
||||
void xfs_rtbitmap_lock_shared(struct xfs_mount *mp,
|
||||
unsigned int rbmlock_flags);
|
||||
void xfs_rtbitmap_unlock_shared(struct xfs_mount *mp,
|
||||
unsigned int rbmlock_flags);
|
||||
#else /* CONFIG_XFS_RT */
|
||||
# define xfs_rtfree_extent(t,b,l) (-ENOSYS)
|
||||
# define xfs_rtfree_blocks(t,rb,rl) (-ENOSYS)
|
||||
|
||||
static inline int xfs_rtfree_blocks(struct xfs_trans *tp,
|
||||
struct xfs_rtgroup *rtg, xfs_fsblock_t rtbno,
|
||||
xfs_filblks_t rtlen)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
# define xfs_rtalloc_query_range(m,t,l,h,f,p) (-ENOSYS)
|
||||
# define xfs_rtalloc_query_all(m,t,f,p) (-ENOSYS)
|
||||
# define xfs_rtbitmap_read_buf(a,b) (-ENOSYS)
|
||||
@ -345,17 +354,11 @@ void xfs_rtbitmap_unlock_shared(struct xfs_mount *mp,
|
||||
# define xfs_rtbuf_cache_relse(a) (0)
|
||||
# define xfs_rtalloc_extent_is_free(m,t,s,l,i) (-ENOSYS)
|
||||
static inline xfs_filblks_t
|
||||
xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t rtextents)
|
||||
xfs_rtbitmap_blockcount_len(struct xfs_mount *mp, xfs_rtbxlen_t rtextents)
|
||||
{
|
||||
/* shut up gcc */
|
||||
return 0;
|
||||
}
|
||||
# define xfs_rtsummary_blockcount(mp, l, b) (0)
|
||||
# define xfs_rtbitmap_lock(mp) do { } while (0)
|
||||
# define xfs_rtbitmap_trans_join(tp) do { } while (0)
|
||||
# define xfs_rtbitmap_unlock(mp) do { } while (0)
|
||||
# define xfs_rtbitmap_lock_shared(mp, lf) do { } while (0)
|
||||
# define xfs_rtbitmap_unlock_shared(mp, lf) do { } while (0)
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
||||
#endif /* __XFS_RTBITMAP_H__ */
|
||||
|
484
fs/xfs/libxfs/xfs_rtgroup.c
Normal file
484
fs/xfs/libxfs/xfs_rtgroup.c
Normal file
@ -0,0 +1,484 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2022-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_shared.h"
|
||||
#include "xfs_format.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_bit.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_alloc_btree.h"
|
||||
#include "xfs_rmap_btree.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_ialloc.h"
|
||||
#include "xfs_rmap.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_ag_resv.h"
|
||||
#include "xfs_health.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_defer.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_trace.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_icache.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "xfs_metafile.h"
|
||||
#include "xfs_metadir.h"
|
||||
|
||||
int
|
||||
xfs_rtgroup_alloc(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgnumber_t rgno,
|
||||
xfs_rgnumber_t rgcount,
|
||||
xfs_rtbxlen_t rextents)
|
||||
{
|
||||
struct xfs_rtgroup *rtg;
|
||||
int error;
|
||||
|
||||
rtg = kzalloc(sizeof(struct xfs_rtgroup), GFP_KERNEL);
|
||||
if (!rtg)
|
||||
return -ENOMEM;
|
||||
|
||||
error = xfs_group_insert(mp, rtg_group(rtg), rgno, XG_TYPE_RTG);
|
||||
if (error)
|
||||
goto out_free_rtg;
|
||||
return 0;
|
||||
|
||||
out_free_rtg:
|
||||
kfree(rtg);
|
||||
return error;
|
||||
}
|
||||
|
||||
void
|
||||
xfs_rtgroup_free(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgnumber_t rgno)
|
||||
{
|
||||
xfs_group_free(mp, rgno, XG_TYPE_RTG, NULL);
|
||||
}
|
||||
|
||||
/* Free a range of incore rtgroup objects. */
|
||||
void
|
||||
xfs_free_rtgroups(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgnumber_t first_rgno,
|
||||
xfs_rgnumber_t end_rgno)
|
||||
{
|
||||
xfs_rgnumber_t rgno;
|
||||
|
||||
for (rgno = first_rgno; rgno < end_rgno; rgno++)
|
||||
xfs_rtgroup_free(mp, rgno);
|
||||
}
|
||||
|
||||
/* Initialize some range of incore rtgroup objects. */
|
||||
int
|
||||
xfs_initialize_rtgroups(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgnumber_t first_rgno,
|
||||
xfs_rgnumber_t end_rgno,
|
||||
xfs_rtbxlen_t rextents)
|
||||
{
|
||||
xfs_rgnumber_t index;
|
||||
int error;
|
||||
|
||||
if (first_rgno >= end_rgno)
|
||||
return 0;
|
||||
|
||||
for (index = first_rgno; index < end_rgno; index++) {
|
||||
error = xfs_rtgroup_alloc(mp, index, end_rgno, rextents);
|
||||
if (error)
|
||||
goto out_unwind_new_rtgs;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unwind_new_rtgs:
|
||||
xfs_free_rtgroups(mp, first_rgno, index);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Compute the number of rt extents in this realtime group. */
|
||||
xfs_rtxnum_t
|
||||
__xfs_rtgroup_extents(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgnumber_t rgno,
|
||||
xfs_rgnumber_t rgcount,
|
||||
xfs_rtbxlen_t rextents)
|
||||
{
|
||||
ASSERT(rgno < rgcount);
|
||||
if (rgno == rgcount - 1)
|
||||
return rextents - ((xfs_rtxnum_t)rgno * mp->m_sb.sb_rgextents);
|
||||
|
||||
ASSERT(xfs_has_rtgroups(mp));
|
||||
return mp->m_sb.sb_rgextents;
|
||||
}
|
||||
|
||||
xfs_rtxnum_t
|
||||
xfs_rtgroup_extents(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgnumber_t rgno)
|
||||
{
|
||||
return __xfs_rtgroup_extents(mp, rgno, mp->m_sb.sb_rgcount,
|
||||
mp->m_sb.sb_rextents);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the rt extent count of the previous tail rtgroup if it changed during
|
||||
* recovery (i.e. recovery of a growfs).
|
||||
*/
|
||||
int
|
||||
xfs_update_last_rtgroup_size(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgnumber_t prev_rgcount)
|
||||
{
|
||||
struct xfs_rtgroup *rtg;
|
||||
|
||||
ASSERT(prev_rgcount > 0);
|
||||
|
||||
rtg = xfs_rtgroup_grab(mp, prev_rgcount - 1);
|
||||
if (!rtg)
|
||||
return -EFSCORRUPTED;
|
||||
rtg->rtg_extents = __xfs_rtgroup_extents(mp, prev_rgcount - 1,
|
||||
mp->m_sb.sb_rgcount, mp->m_sb.sb_rextents);
|
||||
xfs_rtgroup_rele(rtg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Lock metadata inodes associated with this rt group. */
|
||||
void
|
||||
xfs_rtgroup_lock(
|
||||
struct xfs_rtgroup *rtg,
|
||||
unsigned int rtglock_flags)
|
||||
{
|
||||
ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
|
||||
ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
|
||||
!(rtglock_flags & XFS_RTGLOCK_BITMAP));
|
||||
|
||||
if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
|
||||
/*
|
||||
* Lock both realtime free space metadata inodes for a freespace
|
||||
* update.
|
||||
*/
|
||||
xfs_ilock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_EXCL);
|
||||
xfs_ilock(rtg->rtg_inodes[XFS_RTGI_SUMMARY], XFS_ILOCK_EXCL);
|
||||
} else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
|
||||
xfs_ilock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_SHARED);
|
||||
}
|
||||
}
|
||||
|
||||
/* Unlock metadata inodes associated with this rt group. */
|
||||
void
|
||||
xfs_rtgroup_unlock(
|
||||
struct xfs_rtgroup *rtg,
|
||||
unsigned int rtglock_flags)
|
||||
{
|
||||
ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
|
||||
ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
|
||||
!(rtglock_flags & XFS_RTGLOCK_BITMAP));
|
||||
|
||||
if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
|
||||
xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_SUMMARY], XFS_ILOCK_EXCL);
|
||||
xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_EXCL);
|
||||
} else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
|
||||
xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_SHARED);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Join realtime group metadata inodes to the transaction. The ILOCKs will be
|
||||
* released on transaction commit.
|
||||
*/
|
||||
void
|
||||
xfs_rtgroup_trans_join(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
unsigned int rtglock_flags)
|
||||
{
|
||||
ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
|
||||
ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED));
|
||||
|
||||
if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
|
||||
xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_BITMAP],
|
||||
XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_SUMMARY],
|
||||
XFS_ILOCK_EXCL);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
static struct lock_class_key xfs_rtginode_lock_class;
|
||||
|
||||
static int
|
||||
xfs_rtginode_ilock_cmp_fn(
|
||||
const struct lockdep_map *m1,
|
||||
const struct lockdep_map *m2)
|
||||
{
|
||||
const struct xfs_inode *ip1 =
|
||||
container_of(m1, struct xfs_inode, i_lock.dep_map);
|
||||
const struct xfs_inode *ip2 =
|
||||
container_of(m2, struct xfs_inode, i_lock.dep_map);
|
||||
|
||||
if (ip1->i_projid < ip2->i_projid)
|
||||
return -1;
|
||||
if (ip1->i_projid > ip2->i_projid)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
xfs_rtginode_ilock_print_fn(
|
||||
const struct lockdep_map *m)
|
||||
{
|
||||
const struct xfs_inode *ip =
|
||||
container_of(m, struct xfs_inode, i_lock.dep_map);
|
||||
|
||||
printk(KERN_CONT " rgno=%u", ip->i_projid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Most of the time each of the RTG inode locks are only taken one at a time.
|
||||
* But when committing deferred ops, more than one of a kind can be taken.
|
||||
* However, deferred rt ops will be committed in rgno order so there is no
|
||||
* potential for deadlocks. The code here is needed to tell lockdep about this
|
||||
* order.
|
||||
*/
|
||||
static inline void
|
||||
xfs_rtginode_lockdep_setup(
|
||||
struct xfs_inode *ip,
|
||||
xfs_rgnumber_t rgno,
|
||||
enum xfs_rtg_inodes type)
|
||||
{
|
||||
lockdep_set_class_and_subclass(&ip->i_lock, &xfs_rtginode_lock_class,
|
||||
type);
|
||||
lock_set_cmp_fn(&ip->i_lock, xfs_rtginode_ilock_cmp_fn,
|
||||
xfs_rtginode_ilock_print_fn);
|
||||
}
|
||||
#else
|
||||
#define xfs_rtginode_lockdep_setup(ip, rgno, type) do { } while (0)
|
||||
#endif /* CONFIG_PROVE_LOCKING */
|
||||
|
||||
struct xfs_rtginode_ops {
|
||||
const char *name; /* short name */
|
||||
|
||||
enum xfs_metafile_type metafile_type;
|
||||
|
||||
/* Does the fs have this feature? */
|
||||
bool (*enabled)(struct xfs_mount *mp);
|
||||
|
||||
/* Create this rtgroup metadata inode and initialize it. */
|
||||
int (*create)(struct xfs_rtgroup *rtg,
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_trans *tp,
|
||||
bool init);
|
||||
};
|
||||
|
||||
static const struct xfs_rtginode_ops xfs_rtginode_ops[XFS_RTGI_MAX] = {
|
||||
[XFS_RTGI_BITMAP] = {
|
||||
.name = "bitmap",
|
||||
.metafile_type = XFS_METAFILE_RTBITMAP,
|
||||
.create = xfs_rtbitmap_create,
|
||||
},
|
||||
[XFS_RTGI_SUMMARY] = {
|
||||
.name = "summary",
|
||||
.metafile_type = XFS_METAFILE_RTSUMMARY,
|
||||
.create = xfs_rtsummary_create,
|
||||
},
|
||||
};
|
||||
|
||||
/* Return the shortname of this rtgroup inode. */
|
||||
const char *
|
||||
xfs_rtginode_name(
|
||||
enum xfs_rtg_inodes type)
|
||||
{
|
||||
return xfs_rtginode_ops[type].name;
|
||||
}
|
||||
|
||||
/* Return the metafile type of this rtgroup inode. */
|
||||
enum xfs_metafile_type
|
||||
xfs_rtginode_metafile_type(
|
||||
enum xfs_rtg_inodes type)
|
||||
{
|
||||
return xfs_rtginode_ops[type].metafile_type;
|
||||
}
|
||||
|
||||
/* Should this rtgroup inode be present? */
|
||||
bool
|
||||
xfs_rtginode_enabled(
|
||||
struct xfs_rtgroup *rtg,
|
||||
enum xfs_rtg_inodes type)
|
||||
{
|
||||
const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
|
||||
|
||||
if (!ops->enabled)
|
||||
return true;
|
||||
return ops->enabled(rtg_mount(rtg));
|
||||
}
|
||||
|
||||
/* Load and existing rtgroup inode into the rtgroup structure. */
|
||||
int
|
||||
xfs_rtginode_load(
|
||||
struct xfs_rtgroup *rtg,
|
||||
enum xfs_rtg_inodes type,
|
||||
struct xfs_trans *tp)
|
||||
{
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
struct xfs_inode *ip;
|
||||
const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
|
||||
int error;
|
||||
|
||||
if (!xfs_rtginode_enabled(rtg, type))
|
||||
return 0;
|
||||
|
||||
if (!xfs_has_rtgroups(mp)) {
|
||||
xfs_ino_t ino;
|
||||
|
||||
switch (type) {
|
||||
case XFS_RTGI_BITMAP:
|
||||
ino = mp->m_sb.sb_rbmino;
|
||||
break;
|
||||
case XFS_RTGI_SUMMARY:
|
||||
ino = mp->m_sb.sb_rsumino;
|
||||
break;
|
||||
default:
|
||||
/* None of the other types exist on !rtgroups */
|
||||
return 0;
|
||||
}
|
||||
|
||||
error = xfs_trans_metafile_iget(tp, ino, ops->metafile_type,
|
||||
&ip);
|
||||
} else {
|
||||
const char *path;
|
||||
|
||||
if (!mp->m_rtdirip)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
path = xfs_rtginode_path(rtg_rgno(rtg), type);
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
error = xfs_metadir_load(tp, mp->m_rtdirip, path,
|
||||
ops->metafile_type, &ip);
|
||||
kfree(path);
|
||||
}
|
||||
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (XFS_IS_CORRUPT(mp, ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
|
||||
ip->i_df.if_format != XFS_DINODE_FMT_BTREE)) {
|
||||
xfs_irele(ip);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (XFS_IS_CORRUPT(mp, ip->i_projid != rtg_rgno(rtg))) {
|
||||
xfs_irele(ip);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
xfs_rtginode_lockdep_setup(ip, rtg_rgno(rtg), type);
|
||||
rtg->rtg_inodes[type] = ip;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Release an rtgroup metadata inode. */
|
||||
void
|
||||
xfs_rtginode_irele(
|
||||
struct xfs_inode **ipp)
|
||||
{
|
||||
if (*ipp)
|
||||
xfs_irele(*ipp);
|
||||
*ipp = NULL;
|
||||
}
|
||||
|
||||
/* Add a metadata inode for a realtime rmap btree. */
|
||||
int
|
||||
xfs_rtginode_create(
|
||||
struct xfs_rtgroup *rtg,
|
||||
enum xfs_rtg_inodes type,
|
||||
bool init)
|
||||
{
|
||||
const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
struct xfs_metadir_update upd = {
|
||||
.dp = mp->m_rtdirip,
|
||||
.metafile_type = ops->metafile_type,
|
||||
};
|
||||
int error;
|
||||
|
||||
if (!xfs_rtginode_enabled(rtg, type))
|
||||
return 0;
|
||||
|
||||
if (!mp->m_rtdirip)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
upd.path = xfs_rtginode_path(rtg_rgno(rtg), type);
|
||||
if (!upd.path)
|
||||
return -ENOMEM;
|
||||
|
||||
error = xfs_metadir_start_create(&upd);
|
||||
if (error)
|
||||
goto out_path;
|
||||
|
||||
error = xfs_metadir_create(&upd, S_IFREG);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_rtginode_lockdep_setup(upd.ip, rtg_rgno(rtg), type);
|
||||
|
||||
upd.ip->i_projid = rtg_rgno(rtg);
|
||||
error = ops->create(rtg, upd.ip, upd.tp, init);
|
||||
if (error)
|
||||
goto out_cancel;
|
||||
|
||||
error = xfs_metadir_commit(&upd);
|
||||
if (error)
|
||||
goto out_path;
|
||||
|
||||
kfree(upd.path);
|
||||
xfs_finish_inode_setup(upd.ip);
|
||||
rtg->rtg_inodes[type] = upd.ip;
|
||||
return 0;
|
||||
|
||||
out_cancel:
|
||||
xfs_metadir_cancel(&upd, error);
|
||||
/* Have to finish setting up the inode to ensure it's deleted. */
|
||||
if (upd.ip) {
|
||||
xfs_finish_inode_setup(upd.ip);
|
||||
xfs_irele(upd.ip);
|
||||
}
|
||||
out_path:
|
||||
kfree(upd.path);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Create the parent directory for all rtgroup inodes and load it. */
|
||||
int
|
||||
xfs_rtginode_mkdir_parent(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
if (!mp->m_metadirip)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
return xfs_metadir_mkdir(mp->m_metadirip, "rtgroups", &mp->m_rtdirip);
|
||||
}
|
||||
|
||||
/* Load the parent directory of all rtgroup inodes. */
|
||||
int
|
||||
xfs_rtginode_load_parent(
|
||||
struct xfs_trans *tp)
|
||||
{
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
|
||||
if (!mp->m_metadirip)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
return xfs_metadir_load(tp, mp->m_metadirip, "rtgroups",
|
||||
XFS_METAFILE_DIR, &mp->m_rtdirip);
|
||||
}
|
274
fs/xfs/libxfs/xfs_rtgroup.h
Normal file
274
fs/xfs/libxfs/xfs_rtgroup.h
Normal file
@ -0,0 +1,274 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Copyright (c) 2022-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#ifndef __LIBXFS_RTGROUP_H
|
||||
#define __LIBXFS_RTGROUP_H 1
|
||||
|
||||
#include "xfs_group.h"
|
||||
|
||||
struct xfs_mount;
|
||||
struct xfs_trans;
|
||||
|
||||
enum xfs_rtg_inodes {
|
||||
XFS_RTGI_BITMAP, /* allocation bitmap */
|
||||
XFS_RTGI_SUMMARY, /* allocation summary */
|
||||
|
||||
XFS_RTGI_MAX,
|
||||
};
|
||||
|
||||
#ifdef MAX_LOCKDEP_SUBCLASSES
|
||||
static_assert(XFS_RTGI_MAX <= MAX_LOCKDEP_SUBCLASSES);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Realtime group incore structure, similar to the per-AG structure.
|
||||
*/
|
||||
struct xfs_rtgroup {
|
||||
struct xfs_group rtg_group;
|
||||
|
||||
/* per-rtgroup metadata inodes */
|
||||
struct xfs_inode *rtg_inodes[XFS_RTGI_MAX];
|
||||
|
||||
/* Number of blocks in this group */
|
||||
xfs_rtxnum_t rtg_extents;
|
||||
|
||||
/*
|
||||
* Cache of rt summary level per bitmap block with the invariant that
|
||||
* rtg_rsum_cache[bbno] > the maximum i for which rsum[i][bbno] != 0,
|
||||
* or 0 if rsum[i][bbno] == 0 for all i.
|
||||
*
|
||||
* Reads and writes are serialized by the rsumip inode lock.
|
||||
*/
|
||||
uint8_t *rtg_rsum_cache;
|
||||
};
|
||||
|
||||
static inline struct xfs_rtgroup *to_rtg(struct xfs_group *xg)
|
||||
{
|
||||
return container_of(xg, struct xfs_rtgroup, rtg_group);
|
||||
}
|
||||
|
||||
static inline struct xfs_group *rtg_group(struct xfs_rtgroup *rtg)
|
||||
{
|
||||
return &rtg->rtg_group;
|
||||
}
|
||||
|
||||
static inline struct xfs_mount *rtg_mount(const struct xfs_rtgroup *rtg)
|
||||
{
|
||||
return rtg->rtg_group.xg_mount;
|
||||
}
|
||||
|
||||
static inline xfs_rgnumber_t rtg_rgno(const struct xfs_rtgroup *rtg)
|
||||
{
|
||||
return rtg->rtg_group.xg_gno;
|
||||
}
|
||||
|
||||
/* Passive rtgroup references */
|
||||
static inline struct xfs_rtgroup *
|
||||
xfs_rtgroup_get(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgnumber_t rgno)
|
||||
{
|
||||
return to_rtg(xfs_group_get(mp, rgno, XG_TYPE_RTG));
|
||||
}
|
||||
|
||||
static inline struct xfs_rtgroup *
|
||||
xfs_rtgroup_hold(
|
||||
struct xfs_rtgroup *rtg)
|
||||
{
|
||||
return to_rtg(xfs_group_hold(rtg_group(rtg)));
|
||||
}
|
||||
|
||||
static inline void
|
||||
xfs_rtgroup_put(
|
||||
struct xfs_rtgroup *rtg)
|
||||
{
|
||||
xfs_group_put(rtg_group(rtg));
|
||||
}
|
||||
|
||||
/* Active rtgroup references */
|
||||
static inline struct xfs_rtgroup *
|
||||
xfs_rtgroup_grab(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgnumber_t rgno)
|
||||
{
|
||||
return to_rtg(xfs_group_grab(mp, rgno, XG_TYPE_RTG));
|
||||
}
|
||||
|
||||
static inline void
|
||||
xfs_rtgroup_rele(
|
||||
struct xfs_rtgroup *rtg)
|
||||
{
|
||||
xfs_group_rele(rtg_group(rtg));
|
||||
}
|
||||
|
||||
static inline struct xfs_rtgroup *
|
||||
xfs_rtgroup_next_range(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
xfs_rgnumber_t start_rgno,
|
||||
xfs_rgnumber_t end_rgno)
|
||||
{
|
||||
return to_rtg(xfs_group_next_range(mp, rtg ? rtg_group(rtg) : NULL,
|
||||
start_rgno, end_rgno, XG_TYPE_RTG));
|
||||
}
|
||||
|
||||
static inline struct xfs_rtgroup *
|
||||
xfs_rtgroup_next(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg)
|
||||
{
|
||||
return xfs_rtgroup_next_range(mp, rtg, 0, mp->m_sb.sb_rgcount - 1);
|
||||
}
|
||||
|
||||
static inline xfs_rtblock_t
|
||||
xfs_rgno_start_rtb(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgnumber_t rgno)
|
||||
{
|
||||
if (mp->m_rgblklog >= 0)
|
||||
return ((xfs_rtblock_t)rgno << mp->m_rgblklog);
|
||||
return ((xfs_rtblock_t)rgno * mp->m_rgblocks);
|
||||
}
|
||||
|
||||
static inline xfs_rtblock_t
|
||||
__xfs_rgbno_to_rtb(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rgnumber_t rgno,
|
||||
xfs_rgblock_t rgbno)
|
||||
{
|
||||
return xfs_rgno_start_rtb(mp, rgno) + rgbno;
|
||||
}
|
||||
|
||||
static inline xfs_rtblock_t
|
||||
xfs_rgbno_to_rtb(
|
||||
struct xfs_rtgroup *rtg,
|
||||
xfs_rgblock_t rgbno)
|
||||
{
|
||||
return __xfs_rgbno_to_rtb(rtg_mount(rtg), rtg_rgno(rtg), rgbno);
|
||||
}
|
||||
|
||||
static inline xfs_rgnumber_t
|
||||
xfs_rtb_to_rgno(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rtblock_t rtbno)
|
||||
{
|
||||
if (!xfs_has_rtgroups(mp))
|
||||
return 0;
|
||||
|
||||
if (mp->m_rgblklog >= 0)
|
||||
return rtbno >> mp->m_rgblklog;
|
||||
|
||||
return div_u64(rtbno, mp->m_rgblocks);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
__xfs_rtb_to_rgbno(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rtblock_t rtbno)
|
||||
{
|
||||
uint32_t rem;
|
||||
|
||||
if (!xfs_has_rtgroups(mp))
|
||||
return rtbno;
|
||||
|
||||
if (mp->m_rgblklog >= 0)
|
||||
return rtbno & mp->m_rgblkmask;
|
||||
|
||||
div_u64_rem(rtbno, mp->m_rgblocks, &rem);
|
||||
return rem;
|
||||
}
|
||||
|
||||
static inline xfs_rgblock_t
|
||||
xfs_rtb_to_rgbno(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rtblock_t rtbno)
|
||||
{
|
||||
return __xfs_rtb_to_rgbno(mp, rtbno);
|
||||
}
|
||||
|
||||
static inline xfs_daddr_t
|
||||
xfs_rtb_to_daddr(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rtblock_t rtbno)
|
||||
{
|
||||
return rtbno << mp->m_blkbb_log;
|
||||
}
|
||||
|
||||
static inline xfs_rtblock_t
|
||||
xfs_daddr_to_rtb(
|
||||
struct xfs_mount *mp,
|
||||
xfs_daddr_t daddr)
|
||||
{
|
||||
return daddr >> mp->m_blkbb_log;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XFS_RT
|
||||
int xfs_rtgroup_alloc(struct xfs_mount *mp, xfs_rgnumber_t rgno,
|
||||
xfs_rgnumber_t rgcount, xfs_rtbxlen_t rextents);
|
||||
void xfs_rtgroup_free(struct xfs_mount *mp, xfs_rgnumber_t rgno);
|
||||
|
||||
void xfs_free_rtgroups(struct xfs_mount *mp, xfs_rgnumber_t first_rgno,
|
||||
xfs_rgnumber_t end_rgno);
|
||||
int xfs_initialize_rtgroups(struct xfs_mount *mp, xfs_rgnumber_t first_rgno,
|
||||
xfs_rgnumber_t end_rgno, xfs_rtbxlen_t rextents);
|
||||
|
||||
xfs_rtxnum_t __xfs_rtgroup_extents(struct xfs_mount *mp, xfs_rgnumber_t rgno,
|
||||
xfs_rgnumber_t rgcount, xfs_rtbxlen_t rextents);
|
||||
xfs_rtxnum_t xfs_rtgroup_extents(struct xfs_mount *mp, xfs_rgnumber_t rgno);
|
||||
|
||||
int xfs_update_last_rtgroup_size(struct xfs_mount *mp,
|
||||
xfs_rgnumber_t prev_rgcount);
|
||||
|
||||
/* Lock the rt bitmap inode in exclusive mode */
|
||||
#define XFS_RTGLOCK_BITMAP (1U << 0)
|
||||
/* Lock the rt bitmap inode in shared mode */
|
||||
#define XFS_RTGLOCK_BITMAP_SHARED (1U << 1)
|
||||
|
||||
#define XFS_RTGLOCK_ALL_FLAGS (XFS_RTGLOCK_BITMAP | \
|
||||
XFS_RTGLOCK_BITMAP_SHARED)
|
||||
|
||||
void xfs_rtgroup_lock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);
|
||||
void xfs_rtgroup_unlock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);
|
||||
void xfs_rtgroup_trans_join(struct xfs_trans *tp, struct xfs_rtgroup *rtg,
|
||||
unsigned int rtglock_flags);
|
||||
|
||||
int xfs_rtginode_mkdir_parent(struct xfs_mount *mp);
|
||||
int xfs_rtginode_load_parent(struct xfs_trans *tp);
|
||||
|
||||
const char *xfs_rtginode_name(enum xfs_rtg_inodes type);
|
||||
enum xfs_metafile_type xfs_rtginode_metafile_type(enum xfs_rtg_inodes type);
|
||||
bool xfs_rtginode_enabled(struct xfs_rtgroup *rtg, enum xfs_rtg_inodes type);
|
||||
int xfs_rtginode_load(struct xfs_rtgroup *rtg, enum xfs_rtg_inodes type,
|
||||
struct xfs_trans *tp);
|
||||
int xfs_rtginode_create(struct xfs_rtgroup *rtg, enum xfs_rtg_inodes type,
|
||||
bool init);
|
||||
void xfs_rtginode_irele(struct xfs_inode **ipp);
|
||||
|
||||
static inline const char *xfs_rtginode_path(xfs_rgnumber_t rgno,
|
||||
enum xfs_rtg_inodes type)
|
||||
{
|
||||
return kasprintf(GFP_KERNEL, "%u.%s", rgno, xfs_rtginode_name(type));
|
||||
}
|
||||
#else
|
||||
static inline void xfs_free_rtgroups(struct xfs_mount *mp,
|
||||
xfs_rgnumber_t first_rgno, xfs_rgnumber_t end_rgno)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int xfs_initialize_rtgroups(struct xfs_mount *mp,
|
||||
xfs_rgnumber_t first_rgno, xfs_rgnumber_t end_rgno,
|
||||
xfs_rtbxlen_t rextents)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
# define xfs_rtgroup_extents(mp, rgno) (0)
|
||||
# define xfs_update_last_rtgroup_size(mp, rgno) (-EOPNOTSUPP)
|
||||
# define xfs_rtgroup_lock(rtg, gf) ((void)0)
|
||||
# define xfs_rtgroup_unlock(rtg, gf) ((void)0)
|
||||
# define xfs_rtgroup_trans_join(tp, rtg, gf) ((void)0)
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
||||
#endif /* __LIBXFS_RTGROUP_H */
|
@ -710,6 +710,9 @@ __xfs_sb_from_disk(
|
||||
to->sb_metadirino = be64_to_cpu(from->sb_metadirino);
|
||||
else
|
||||
to->sb_metadirino = NULLFSINO;
|
||||
|
||||
to->sb_rgcount = 1;
|
||||
to->sb_rgextents = 0;
|
||||
}
|
||||
|
||||
void
|
||||
@ -994,8 +997,18 @@ xfs_mount_sb_set_rextsize(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_sb *sbp)
|
||||
{
|
||||
struct xfs_groups *rgs = &mp->m_groups[XG_TYPE_RTG];
|
||||
|
||||
mp->m_rtxblklog = log2_if_power2(sbp->sb_rextsize);
|
||||
mp->m_rtxblkmask = mask64_if_power2(sbp->sb_rextsize);
|
||||
|
||||
mp->m_rgblocks = 0;
|
||||
mp->m_rgblklog = 0;
|
||||
mp->m_rgblkmask = (uint64_t)-1;
|
||||
|
||||
rgs->blocks = 0;
|
||||
rgs->blklog = 0;
|
||||
rgs->blkmask = (uint64_t)-1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -224,7 +224,7 @@ xfs_rtalloc_block_count(
|
||||
xfs_rtxlen_t rtxlen;
|
||||
|
||||
rtxlen = xfs_extlen_to_rtxlen(mp, XFS_MAX_BMBT_EXTLEN);
|
||||
rtbmp_blocks = xfs_rtbitmap_blockcount(mp, rtxlen);
|
||||
rtbmp_blocks = xfs_rtbitmap_blockcount_len(mp, rtxlen);
|
||||
return (rtbmp_blocks + 1) * num_ops;
|
||||
}
|
||||
|
||||
|
@ -9,10 +9,12 @@
|
||||
typedef uint32_t prid_t; /* project ID */
|
||||
|
||||
typedef uint32_t xfs_agblock_t; /* blockno in alloc. group */
|
||||
typedef uint32_t xfs_rgblock_t; /* blockno in realtime group */
|
||||
typedef uint32_t xfs_agino_t; /* inode # within allocation grp */
|
||||
typedef uint32_t xfs_extlen_t; /* extent length in blocks */
|
||||
typedef uint32_t xfs_rtxlen_t; /* file extent length in rtextents */
|
||||
typedef uint32_t xfs_agnumber_t; /* allocation group number */
|
||||
typedef uint32_t xfs_rgnumber_t; /* realtime group number */
|
||||
typedef uint64_t xfs_extnum_t; /* # of extents in a file */
|
||||
typedef uint32_t xfs_aextnum_t; /* # extents in an attribute fork */
|
||||
typedef int64_t xfs_fsize_t; /* bytes in a file */
|
||||
@ -53,7 +55,9 @@ typedef void * xfs_failaddr_t;
|
||||
#define NULLFILEOFF ((xfs_fileoff_t)-1)
|
||||
|
||||
#define NULLAGBLOCK ((xfs_agblock_t)-1)
|
||||
#define NULLRGBLOCK ((xfs_rgblock_t)-1)
|
||||
#define NULLAGNUMBER ((xfs_agnumber_t)-1)
|
||||
#define NULLRGNUMBER ((xfs_rgnumber_t)-1)
|
||||
|
||||
#define NULLCOMMITLSN ((xfs_lsn_t)-1)
|
||||
|
||||
@ -214,11 +218,13 @@ enum xbtree_recpacking {
|
||||
|
||||
enum xfs_group_type {
|
||||
XG_TYPE_AG,
|
||||
XG_TYPE_RTG,
|
||||
XG_TYPE_MAX,
|
||||
} __packed;
|
||||
|
||||
#define XG_TYPE_STRINGS \
|
||||
{ XG_TYPE_AG, "ag" }
|
||||
{ XG_TYPE_AG, "ag" }, \
|
||||
{ XG_TYPE_RTG, "rtg" }
|
||||
|
||||
/*
|
||||
* Type verifier functions
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_rmap.h"
|
||||
#include "xfs_rmap_btree.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
#include "xfs_health.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
@ -314,8 +315,20 @@ xchk_bmap_rt_iextent_xref(
|
||||
struct xchk_bmap_info *info,
|
||||
struct xfs_bmbt_irec *irec)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = xchk_rtgroup_init_existing(info->sc,
|
||||
xfs_rtb_to_rgno(ip->i_mount, irec->br_startblock),
|
||||
&info->sc->sr);
|
||||
if (!xchk_fblock_process_error(info->sc, info->whichfork,
|
||||
irec->br_startoff, &error))
|
||||
return;
|
||||
|
||||
xchk_rtgroup_lock(&info->sc->sr, XCHK_RTGLOCK_ALL);
|
||||
xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
|
||||
irec->br_blockcount);
|
||||
|
||||
xchk_rtgroup_free(info->sc, &info->sc->sr);
|
||||
}
|
||||
|
||||
/* Cross-reference a single datadev extent record. */
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "xfs_quota.h"
|
||||
#include "xfs_exchmaps.h"
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/trace.h"
|
||||
@ -121,6 +122,17 @@ xchk_process_error(
|
||||
XFS_SCRUB_OFLAG_CORRUPT, __return_address);
|
||||
}
|
||||
|
||||
bool
|
||||
xchk_process_rt_error(
|
||||
struct xfs_scrub *sc,
|
||||
xfs_rgnumber_t rgno,
|
||||
xfs_rgblock_t rgbno,
|
||||
int *error)
|
||||
{
|
||||
return __xchk_process_error(sc, rgno, rgbno, error,
|
||||
XFS_SCRUB_OFLAG_CORRUPT, __return_address);
|
||||
}
|
||||
|
||||
bool
|
||||
xchk_xref_process_error(
|
||||
struct xfs_scrub *sc,
|
||||
@ -684,6 +696,72 @@ xchk_ag_init(
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XFS_RT
|
||||
/*
|
||||
* For scrubbing a realtime group, grab all the in-core resources we'll need to
|
||||
* check the metadata, which means taking the ILOCK of the realtime group's
|
||||
* metadata inodes. Callers must not join these inodes to the transaction with
|
||||
* non-zero lockflags or concurrency problems will result. The @rtglock_flags
|
||||
* argument takes XFS_RTGLOCK_* flags.
|
||||
*/
|
||||
int
|
||||
xchk_rtgroup_init(
|
||||
struct xfs_scrub *sc,
|
||||
xfs_rgnumber_t rgno,
|
||||
struct xchk_rt *sr)
|
||||
{
|
||||
ASSERT(sr->rtg == NULL);
|
||||
ASSERT(sr->rtlock_flags == 0);
|
||||
|
||||
sr->rtg = xfs_rtgroup_get(sc->mp, rgno);
|
||||
if (!sr->rtg)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
xchk_rtgroup_lock(
|
||||
struct xchk_rt *sr,
|
||||
unsigned int rtglock_flags)
|
||||
{
|
||||
xfs_rtgroup_lock(sr->rtg, rtglock_flags);
|
||||
sr->rtlock_flags = rtglock_flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlock the realtime group. This must be done /after/ committing (or
|
||||
* cancelling) the scrub transaction.
|
||||
*/
|
||||
static void
|
||||
xchk_rtgroup_unlock(
|
||||
struct xchk_rt *sr)
|
||||
{
|
||||
ASSERT(sr->rtg != NULL);
|
||||
|
||||
if (sr->rtlock_flags) {
|
||||
xfs_rtgroup_unlock(sr->rtg, sr->rtlock_flags);
|
||||
sr->rtlock_flags = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlock the realtime group and release its resources. This must be done
|
||||
* /after/ committing (or cancelling) the scrub transaction.
|
||||
*/
|
||||
void
|
||||
xchk_rtgroup_free(
|
||||
struct xfs_scrub *sc,
|
||||
struct xchk_rt *sr)
|
||||
{
|
||||
ASSERT(sr->rtg != NULL);
|
||||
|
||||
xchk_rtgroup_unlock(sr);
|
||||
|
||||
xfs_rtgroup_put(sr->rtg);
|
||||
sr->rtg = NULL;
|
||||
}
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
||||
/* Per-scrubber setup functions */
|
||||
|
||||
void
|
||||
|
@ -12,6 +12,8 @@ void xchk_trans_cancel(struct xfs_scrub *sc);
|
||||
|
||||
bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
|
||||
xfs_agblock_t bno, int *error);
|
||||
bool xchk_process_rt_error(struct xfs_scrub *sc, xfs_rgnumber_t rgno,
|
||||
xfs_rgblock_t rgbno, int *error);
|
||||
bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
|
||||
xfs_fileoff_t offset, int *error);
|
||||
|
||||
@ -118,6 +120,34 @@ xchk_ag_init_existing(
|
||||
return error == -ENOENT ? -EFSCORRUPTED : error;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XFS_RT
|
||||
|
||||
/* All the locks we need to check an rtgroup. */
|
||||
#define XCHK_RTGLOCK_ALL (XFS_RTGLOCK_BITMAP)
|
||||
|
||||
int xchk_rtgroup_init(struct xfs_scrub *sc, xfs_rgnumber_t rgno,
|
||||
struct xchk_rt *sr);
|
||||
|
||||
static inline int
|
||||
xchk_rtgroup_init_existing(
|
||||
struct xfs_scrub *sc,
|
||||
xfs_rgnumber_t rgno,
|
||||
struct xchk_rt *sr)
|
||||
{
|
||||
int error = xchk_rtgroup_init(sc, rgno, sr);
|
||||
|
||||
return error == -ENOENT ? -EFSCORRUPTED : error;
|
||||
}
|
||||
|
||||
void xchk_rtgroup_lock(struct xchk_rt *sr, unsigned int rtglock_flags);
|
||||
void xchk_rtgroup_free(struct xfs_scrub *sc, struct xchk_rt *sr);
|
||||
#else
|
||||
# define xchk_rtgroup_init(sc, rgno, sr) (-EFSCORRUPTED)
|
||||
# define xchk_rtgroup_init_existing(sc, rgno, sr) (-EFSCORRUPTED)
|
||||
# define xchk_rtgroup_lock(sc, lockflags) do { } while (0)
|
||||
# define xchk_rtgroup_free(sc, sr) do { } while (0)
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
||||
int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
|
||||
struct xchk_ag *sa);
|
||||
void xchk_ag_btcur_free(struct xchk_ag *sa);
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_icache.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/trace.h"
|
||||
@ -386,7 +387,7 @@ retry:
|
||||
#ifdef CONFIG_XFS_RT
|
||||
STATIC int
|
||||
xchk_fscount_add_frextent(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_trans *tp,
|
||||
const struct xfs_rtalloc_rec *rec,
|
||||
void *priv)
|
||||
@ -407,6 +408,7 @@ xchk_fscount_count_frextents(
|
||||
struct xchk_fscounters *fsc)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_rtgroup *rtg = NULL;
|
||||
int error;
|
||||
|
||||
fsc->frextents = 0;
|
||||
@ -414,19 +416,20 @@ xchk_fscount_count_frextents(
|
||||
if (!xfs_has_realtime(mp))
|
||||
return 0;
|
||||
|
||||
xfs_rtbitmap_lock_shared(sc->mp, XFS_RBMLOCK_BITMAP);
|
||||
error = xfs_rtalloc_query_all(sc->mp, sc->tp,
|
||||
xchk_fscount_add_frextent, fsc);
|
||||
if (error) {
|
||||
xchk_set_incomplete(sc);
|
||||
goto out_unlock;
|
||||
while ((rtg = xfs_rtgroup_next(mp, rtg))) {
|
||||
xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
|
||||
error = xfs_rtalloc_query_all(rtg, sc->tp,
|
||||
xchk_fscount_add_frextent, fsc);
|
||||
xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
|
||||
if (error) {
|
||||
xchk_set_incomplete(sc);
|
||||
xfs_rtgroup_rele(rtg);
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
fsc->frextents_delayed = percpu_counter_sum(&mp->m_delalloc_rtextents);
|
||||
|
||||
out_unlock:
|
||||
xfs_rtbitmap_unlock_shared(sc->mp, XFS_RBMLOCK_BITMAP);
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
STATIC int
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "xfs_rmap.h"
|
||||
#include "xfs_rmap_btree.h"
|
||||
#include "xfs_refcount_btree.h"
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "xfs_extent_busy.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_ag_resv.h"
|
||||
@ -952,6 +953,29 @@ xrep_ag_init(
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XFS_RT
|
||||
/*
|
||||
* Given a reference to a rtgroup structure, lock rtgroup btree inodes and
|
||||
* create btree cursors. Must only be called to repair a regular rt file.
|
||||
*/
|
||||
int
|
||||
xrep_rtgroup_init(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xchk_rt *sr,
|
||||
unsigned int rtglock_flags)
|
||||
{
|
||||
ASSERT(sr->rtg == NULL);
|
||||
|
||||
xfs_rtgroup_lock(rtg, rtglock_flags);
|
||||
sr->rtlock_flags = rtglock_flags;
|
||||
|
||||
/* Grab our own passive reference from the caller's ref. */
|
||||
sr->rtg = xfs_rtgroup_hold(rtg);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
||||
/* Reinitialize the per-AG block reservation for the AG we just fixed. */
|
||||
int
|
||||
xrep_reset_perag_resv(
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include "xfs_quota_defs.h"
|
||||
|
||||
struct xfs_rtgroup;
|
||||
struct xchk_stats_run;
|
||||
|
||||
static inline int xrep_notsupported(struct xfs_scrub *sc)
|
||||
@ -106,6 +107,12 @@ int xrep_setup_inode(struct xfs_scrub *sc, const struct xfs_imap *imap);
|
||||
void xrep_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
|
||||
int xrep_ag_init(struct xfs_scrub *sc, struct xfs_perag *pag,
|
||||
struct xchk_ag *sa);
|
||||
#ifdef CONFIG_XFS_RT
|
||||
int xrep_rtgroup_init(struct xfs_scrub *sc, struct xfs_rtgroup *rtg,
|
||||
struct xchk_rt *sr, unsigned int rtglock_flags);
|
||||
#else
|
||||
# define xrep_rtgroup_init(sc, rtg, sr, lockflags) (-ENOSYS)
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
||||
/* Metadata revalidators */
|
||||
|
||||
|
@ -35,6 +35,10 @@ xchk_setup_rtbitmap(
|
||||
return -ENOMEM;
|
||||
sc->buf = rtb;
|
||||
|
||||
error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (xchk_could_repair(sc)) {
|
||||
error = xrep_setup_rtbitmap(sc, rtb);
|
||||
if (error)
|
||||
@ -45,7 +49,8 @@ xchk_setup_rtbitmap(
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xchk_install_live_inode(sc, sc->mp->m_rbmip);
|
||||
error = xchk_install_live_inode(sc,
|
||||
sc->sr.rtg->rtg_inodes[XFS_RTGI_BITMAP]);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
@ -53,18 +58,18 @@ xchk_setup_rtbitmap(
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xchk_ilock(sc, XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP);
|
||||
|
||||
/*
|
||||
* Now that we've locked the rtbitmap, we can't race with growfsrt
|
||||
* trying to expand the bitmap or change the size of the rt volume.
|
||||
* Hence it is safe to compute and check the geometry values.
|
||||
*/
|
||||
xchk_rtgroup_lock(&sc->sr, XFS_RTGLOCK_BITMAP);
|
||||
if (mp->m_sb.sb_rblocks) {
|
||||
rtb->rextents = xfs_rtb_to_rtx(mp, mp->m_sb.sb_rblocks);
|
||||
rtb->rextents = xfs_blen_to_rtbxlen(mp, mp->m_sb.sb_rblocks);
|
||||
rtb->rextslog = xfs_compute_rextslog(rtb->rextents);
|
||||
rtb->rbmblocks = xfs_rtbitmap_blockcount(mp, rtb->rextents);
|
||||
rtb->rbmblocks = xfs_rtbitmap_blockcount(mp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -73,7 +78,7 @@ xchk_setup_rtbitmap(
|
||||
/* Scrub a free extent record from the realtime bitmap. */
|
||||
STATIC int
|
||||
xchk_rtbitmap_rec(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_trans *tp,
|
||||
const struct xfs_rtalloc_rec *rec,
|
||||
void *priv)
|
||||
@ -82,10 +87,10 @@ xchk_rtbitmap_rec(
|
||||
xfs_rtblock_t startblock;
|
||||
xfs_filblks_t blockcount;
|
||||
|
||||
startblock = xfs_rtx_to_rtb(mp, rec->ar_startext);
|
||||
blockcount = xfs_rtx_to_rtb(mp, rec->ar_extcount);
|
||||
startblock = xfs_rtx_to_rtb(rtg, rec->ar_startext);
|
||||
blockcount = xfs_rtxlen_to_extlen(rtg_mount(rtg), rec->ar_extcount);
|
||||
|
||||
if (!xfs_verify_rtbext(mp, startblock, blockcount))
|
||||
if (!xfs_verify_rtbext(rtg_mount(rtg), startblock, blockcount))
|
||||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
|
||||
return 0;
|
||||
}
|
||||
@ -140,18 +145,20 @@ xchk_rtbitmap(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_rtgroup *rtg = sc->sr.rtg;
|
||||
struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
|
||||
struct xchk_rtbitmap *rtb = sc->buf;
|
||||
int error;
|
||||
|
||||
/* Is sb_rextents correct? */
|
||||
if (mp->m_sb.sb_rextents != rtb->rextents) {
|
||||
xchk_ino_set_corrupt(sc, mp->m_rbmip->i_ino);
|
||||
xchk_ino_set_corrupt(sc, rbmip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Is sb_rextslog correct? */
|
||||
if (mp->m_sb.sb_rextslog != rtb->rextslog) {
|
||||
xchk_ino_set_corrupt(sc, mp->m_rbmip->i_ino);
|
||||
xchk_ino_set_corrupt(sc, rbmip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -160,17 +167,17 @@ xchk_rtbitmap(
|
||||
* case can we exceed 4bn bitmap blocks since the super field is a u32.
|
||||
*/
|
||||
if (rtb->rbmblocks > U32_MAX) {
|
||||
xchk_ino_set_corrupt(sc, mp->m_rbmip->i_ino);
|
||||
xchk_ino_set_corrupt(sc, rbmip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
if (mp->m_sb.sb_rbmblocks != rtb->rbmblocks) {
|
||||
xchk_ino_set_corrupt(sc, mp->m_rbmip->i_ino);
|
||||
xchk_ino_set_corrupt(sc, rbmip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The bitmap file length must be aligned to an fsblock. */
|
||||
if (mp->m_rbmip->i_disk_size & mp->m_blockmask) {
|
||||
xchk_ino_set_corrupt(sc, mp->m_rbmip->i_ino);
|
||||
if (rbmip->i_disk_size & mp->m_blockmask) {
|
||||
xchk_ino_set_corrupt(sc, rbmip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -179,8 +186,8 @@ xchk_rtbitmap(
|
||||
* growfsrt expands the bitmap file before updating sb_rextents, so the
|
||||
* file can be larger than sb_rbmblocks.
|
||||
*/
|
||||
if (mp->m_rbmip->i_disk_size < XFS_FSB_TO_B(mp, rtb->rbmblocks)) {
|
||||
xchk_ino_set_corrupt(sc, mp->m_rbmip->i_ino);
|
||||
if (rbmip->i_disk_size < XFS_FSB_TO_B(mp, rtb->rbmblocks)) {
|
||||
xchk_ino_set_corrupt(sc, rbmip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -193,7 +200,7 @@ xchk_rtbitmap(
|
||||
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
|
||||
return error;
|
||||
|
||||
error = xfs_rtalloc_query_all(mp, sc->tp, xchk_rtbitmap_rec, sc);
|
||||
error = xfs_rtalloc_query_all(rtg, sc->tp, xchk_rtbitmap_rec, sc);
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
|
||||
return error;
|
||||
|
||||
@ -207,6 +214,8 @@ xchk_xref_is_used_rt_space(
|
||||
xfs_rtblock_t rtbno,
|
||||
xfs_extlen_t len)
|
||||
{
|
||||
struct xfs_rtgroup *rtg = sc->sr.rtg;
|
||||
struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
|
||||
xfs_rtxnum_t startext;
|
||||
xfs_rtxnum_t endext;
|
||||
bool is_free;
|
||||
@ -217,13 +226,10 @@ xchk_xref_is_used_rt_space(
|
||||
|
||||
startext = xfs_rtb_to_rtx(sc->mp, rtbno);
|
||||
endext = xfs_rtb_to_rtx(sc->mp, rtbno + len - 1);
|
||||
xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
|
||||
error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext,
|
||||
error = xfs_rtalloc_extent_is_free(rtg, sc->tp, startext,
|
||||
endext - startext + 1, &is_free);
|
||||
if (!xchk_should_check_xref(sc, &error, NULL))
|
||||
goto out_unlock;
|
||||
return;
|
||||
if (is_free)
|
||||
xchk_ino_xref_set_corrupt(sc, sc->mp->m_rbmip->i_ino);
|
||||
out_unlock:
|
||||
xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
|
||||
xchk_ino_xref_set_corrupt(sc, rbmip->i_ino);
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_exchmaps.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/trace.h"
|
||||
@ -46,12 +47,19 @@ xchk_setup_rtsummary(
|
||||
struct xchk_rtsummary *rts;
|
||||
int error;
|
||||
|
||||
if (xchk_need_intent_drain(sc))
|
||||
xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
|
||||
|
||||
rts = kvzalloc(struct_size(rts, words, mp->m_blockwsize),
|
||||
XCHK_GFP_FLAGS);
|
||||
if (!rts)
|
||||
return -ENOMEM;
|
||||
sc->buf = rts;
|
||||
|
||||
error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (xchk_could_repair(sc)) {
|
||||
error = xrep_setup_rtsummary(sc, rts);
|
||||
if (error)
|
||||
@ -73,7 +81,8 @@ xchk_setup_rtsummary(
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xchk_install_live_inode(sc, mp->m_rsumip);
|
||||
error = xchk_install_live_inode(sc,
|
||||
sc->sr.rtg->rtg_inodes[XFS_RTGI_SUMMARY]);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
@ -81,30 +90,24 @@ xchk_setup_rtsummary(
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Locking order requires us to take the rtbitmap first. We must be
|
||||
* careful to unlock it ourselves when we are done with the rtbitmap
|
||||
* file since the scrub infrastructure won't do that for us. Only
|
||||
* then we can lock the rtsummary inode.
|
||||
*/
|
||||
xfs_ilock(mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
|
||||
xchk_ilock(sc, XFS_ILOCK_EXCL | XFS_ILOCK_RTSUM);
|
||||
|
||||
/*
|
||||
* Now that we've locked the rtbitmap and rtsummary, we can't race with
|
||||
* growfsrt trying to expand the summary or change the size of the rt
|
||||
* volume. Hence it is safe to compute and check the geometry values.
|
||||
*
|
||||
* Note that there is no strict requirement for an exclusive lock on the
|
||||
* summary here, but to keep the locking APIs simple we lock both inodes
|
||||
* exclusively here. If we ever start caring about running concurrent
|
||||
* fsmap with scrub this could be changed.
|
||||
*/
|
||||
xchk_rtgroup_lock(&sc->sr, XFS_RTGLOCK_BITMAP);
|
||||
if (mp->m_sb.sb_rblocks) {
|
||||
int rextslog;
|
||||
|
||||
rts->rextents = xfs_rtb_to_rtx(mp, mp->m_sb.sb_rblocks);
|
||||
rextslog = xfs_compute_rextslog(rts->rextents);
|
||||
rts->rsumlevels = rextslog + 1;
|
||||
rts->rbmblocks = xfs_rtbitmap_blockcount(mp, rts->rextents);
|
||||
rts->rsumblocks = xfs_rtsummary_blockcount(mp, rts->rsumlevels,
|
||||
rts->rbmblocks);
|
||||
rts->rextents = xfs_blen_to_rtbxlen(mp, mp->m_sb.sb_rblocks);
|
||||
rts->rbmblocks = xfs_rtbitmap_blockcount(mp);
|
||||
rts->rsumblocks =
|
||||
xfs_rtsummary_blockcount(mp, &rts->rsumlevels);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -155,11 +158,12 @@ xchk_rtsum_inc(
|
||||
/* Update the summary file to reflect the free extent that we've accumulated. */
|
||||
STATIC int
|
||||
xchk_rtsum_record_free(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_trans *tp,
|
||||
const struct xfs_rtalloc_rec *rec,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
struct xfs_scrub *sc = priv;
|
||||
xfs_fileoff_t rbmoff;
|
||||
xfs_rtblock_t rtbno;
|
||||
@ -178,11 +182,12 @@ xchk_rtsum_record_free(
|
||||
lenlog = xfs_highbit64(rec->ar_extcount);
|
||||
offs = xfs_rtsumoffs(mp, lenlog, rbmoff);
|
||||
|
||||
rtbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
|
||||
rtlen = xfs_rtx_to_rtb(mp, rec->ar_extcount);
|
||||
rtbno = xfs_rtx_to_rtb(rtg, rec->ar_startext);
|
||||
rtlen = xfs_rtxlen_to_extlen(mp, rec->ar_extcount);
|
||||
|
||||
if (!xfs_verify_rtbext(mp, rtbno, rtlen)) {
|
||||
xchk_ino_xref_set_corrupt(sc, mp->m_rbmip->i_ino);
|
||||
xchk_ino_xref_set_corrupt(sc,
|
||||
rtg->rtg_inodes[XFS_RTGI_BITMAP]->i_ino);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
@ -204,15 +209,14 @@ xchk_rtsum_compute(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
unsigned long long rtbmp_blocks;
|
||||
struct xfs_rtgroup *rtg = sc->sr.rtg;
|
||||
|
||||
/* If the bitmap size doesn't match the computed size, bail. */
|
||||
rtbmp_blocks = xfs_rtbitmap_blockcount(mp, mp->m_sb.sb_rextents);
|
||||
if (XFS_FSB_TO_B(mp, rtbmp_blocks) != mp->m_rbmip->i_disk_size)
|
||||
if (XFS_FSB_TO_B(mp, xfs_rtbitmap_blockcount(mp)) !=
|
||||
rtg->rtg_inodes[XFS_RTGI_BITMAP]->i_disk_size)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
return xfs_rtalloc_query_all(sc->mp, sc->tp, xchk_rtsum_record_free,
|
||||
sc);
|
||||
return xfs_rtalloc_query_all(rtg, sc->tp, xchk_rtsum_record_free, sc);
|
||||
}
|
||||
|
||||
/* Compare the rtsummary file against the one we computed. */
|
||||
@ -231,8 +235,9 @@ xchk_rtsum_compare(
|
||||
xfs_rtsumoff_t sumoff = 0;
|
||||
int error = 0;
|
||||
|
||||
rts->args.mp = sc->mp;
|
||||
rts->args.mp = mp;
|
||||
rts->args.tp = sc->tp;
|
||||
rts->args.rtg = sc->sr.rtg;
|
||||
|
||||
/* Mappings may not cross or lie beyond EOF. */
|
||||
endoff = XFS_B_TO_FSB(mp, ip->i_disk_size);
|
||||
@ -299,31 +304,34 @@ xchk_rtsummary(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_rtgroup *rtg = sc->sr.rtg;
|
||||
struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
|
||||
struct xfs_inode *rsumip = rtg->rtg_inodes[XFS_RTGI_SUMMARY];
|
||||
struct xchk_rtsummary *rts = sc->buf;
|
||||
int error = 0;
|
||||
int error;
|
||||
|
||||
/* Is sb_rextents correct? */
|
||||
if (mp->m_sb.sb_rextents != rts->rextents) {
|
||||
xchk_ino_set_corrupt(sc, mp->m_rbmip->i_ino);
|
||||
goto out_rbm;
|
||||
xchk_ino_set_corrupt(sc, rbmip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Is m_rsumlevels correct? */
|
||||
if (mp->m_rsumlevels != rts->rsumlevels) {
|
||||
xchk_ino_set_corrupt(sc, mp->m_rsumip->i_ino);
|
||||
goto out_rbm;
|
||||
xchk_ino_set_corrupt(sc, rsumip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Is m_rsumsize correct? */
|
||||
if (mp->m_rsumblocks != rts->rsumblocks) {
|
||||
xchk_ino_set_corrupt(sc, mp->m_rsumip->i_ino);
|
||||
goto out_rbm;
|
||||
xchk_ino_set_corrupt(sc, rsumip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The summary file length must be aligned to an fsblock. */
|
||||
if (mp->m_rsumip->i_disk_size & mp->m_blockmask) {
|
||||
xchk_ino_set_corrupt(sc, mp->m_rsumip->i_ino);
|
||||
goto out_rbm;
|
||||
if (rsumip->i_disk_size & mp->m_blockmask) {
|
||||
xchk_ino_set_corrupt(sc, rsumip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -331,15 +339,15 @@ xchk_rtsummary(
|
||||
* growfsrt expands the summary file before updating sb_rextents, so
|
||||
* the file can be larger than rsumsize.
|
||||
*/
|
||||
if (mp->m_rsumip->i_disk_size < XFS_FSB_TO_B(mp, rts->rsumblocks)) {
|
||||
xchk_ino_set_corrupt(sc, mp->m_rsumip->i_ino);
|
||||
goto out_rbm;
|
||||
if (rsumip->i_disk_size < XFS_FSB_TO_B(mp, rts->rsumblocks)) {
|
||||
xchk_ino_set_corrupt(sc, rsumip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Invoke the fork scrubber. */
|
||||
error = xchk_metadata_inode_forks(sc);
|
||||
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
|
||||
goto out_rbm;
|
||||
return error;
|
||||
|
||||
/* Construct the new summary file from the rtbitmap. */
|
||||
error = xchk_rtsum_compute(sc);
|
||||
@ -348,23 +356,12 @@ xchk_rtsummary(
|
||||
* EFSCORRUPTED means the rtbitmap is corrupt, which is an xref
|
||||
* error since we're checking the summary file.
|
||||
*/
|
||||
xchk_ino_xref_set_corrupt(sc, mp->m_rbmip->i_ino);
|
||||
error = 0;
|
||||
goto out_rbm;
|
||||
xchk_ino_set_corrupt(sc, rbmip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
if (error)
|
||||
goto out_rbm;
|
||||
return error;
|
||||
|
||||
/* Does the computed summary file match the actual rtsummary file? */
|
||||
error = xchk_rtsum_compare(sc);
|
||||
|
||||
out_rbm:
|
||||
/*
|
||||
* Unlock the rtbitmap since we're done with it. All other writers of
|
||||
* the rt free space metadata grab the bitmap and summary ILOCKs in
|
||||
* that order, so we're still protected against allocation activities
|
||||
* even if we continue on to the repair function.
|
||||
*/
|
||||
xfs_iunlock(mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
|
||||
return error;
|
||||
return xchk_rtsum_compare(sc);
|
||||
}
|
||||
|
@ -76,8 +76,9 @@ xrep_rtsummary_prep_buf(
|
||||
union xfs_suminfo_raw *ondisk;
|
||||
int error;
|
||||
|
||||
rts->args.mp = sc->mp;
|
||||
rts->args.mp = mp;
|
||||
rts->args.tp = sc->tp;
|
||||
rts->args.rtg = sc->sr.rtg;
|
||||
rts->args.sumbp = bp;
|
||||
ondisk = xfs_rsumblock_infoptr(&rts->args, 0);
|
||||
rts->args.sumbp = NULL;
|
||||
@ -162,8 +163,8 @@ xrep_rtsummary(
|
||||
return error;
|
||||
|
||||
/* Reset incore state and blow out the summary cache. */
|
||||
if (mp->m_rsum_cache)
|
||||
memset(mp->m_rsum_cache, 0xFF, mp->m_sb.sb_rbmblocks);
|
||||
if (sc->sr.rtg->rtg_rsum_cache)
|
||||
memset(sc->sr.rtg->rtg_rsum_cache, 0xFF, mp->m_sb.sb_rbmblocks);
|
||||
|
||||
mp->m_rsumlevels = rts->rsumlevels;
|
||||
mp->m_rsumblocks = rts->rsumblocks;
|
||||
|
@ -225,6 +225,8 @@ xchk_teardown(
|
||||
xfs_trans_cancel(sc->tp);
|
||||
sc->tp = NULL;
|
||||
}
|
||||
if (sc->sr.rtg)
|
||||
xchk_rtgroup_free(sc, &sc->sr);
|
||||
if (sc->ip) {
|
||||
if (sc->ilock_flags)
|
||||
xchk_iunlock(sc, sc->ilock_flags);
|
||||
@ -382,13 +384,13 @@ static const struct xchk_meta_ops meta_scrub_ops[] = {
|
||||
.repair = xrep_parent,
|
||||
},
|
||||
[XFS_SCRUB_TYPE_RTBITMAP] = { /* realtime bitmap */
|
||||
.type = ST_FS,
|
||||
.type = ST_RTGROUP,
|
||||
.setup = xchk_setup_rtbitmap,
|
||||
.scrub = xchk_rtbitmap,
|
||||
.repair = xrep_rtbitmap,
|
||||
},
|
||||
[XFS_SCRUB_TYPE_RTSUM] = { /* realtime summary */
|
||||
.type = ST_FS,
|
||||
.type = ST_RTGROUP,
|
||||
.setup = xchk_setup_rtsummary,
|
||||
.scrub = xchk_rtsummary,
|
||||
.repair = xrep_rtsummary,
|
||||
@ -498,6 +500,33 @@ xchk_validate_inputs(
|
||||
break;
|
||||
case ST_GENERIC:
|
||||
break;
|
||||
case ST_RTGROUP:
|
||||
if (sm->sm_ino || sm->sm_gen)
|
||||
goto out;
|
||||
if (xfs_has_rtgroups(mp)) {
|
||||
/*
|
||||
* On a rtgroups filesystem, there won't be an rtbitmap
|
||||
* or rtsummary file for group 0 unless there's
|
||||
* actually a realtime volume attached. However, older
|
||||
* xfs_scrub always calls the rtbitmap/rtsummary
|
||||
* scrubbers with sm_agno==0 so transform the error
|
||||
* code to ENOENT.
|
||||
*/
|
||||
if (sm->sm_agno >= mp->m_sb.sb_rgcount) {
|
||||
if (sm->sm_agno == 0)
|
||||
error = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Prior to rtgroups, the rtbitmap/rtsummary scrubbers
|
||||
* accepted sm_agno==0, so we still accept that for
|
||||
* scrubbing pre-rtgroups filesystems.
|
||||
*/
|
||||
if (sm->sm_agno != 0)
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
|
@ -74,6 +74,7 @@ enum xchk_type {
|
||||
ST_FS, /* per-FS metadata */
|
||||
ST_INODE, /* per-inode metadata */
|
||||
ST_GENERIC, /* determined by the scrubber */
|
||||
ST_RTGROUP, /* rtgroup metadata */
|
||||
};
|
||||
|
||||
struct xchk_meta_ops {
|
||||
@ -118,6 +119,15 @@ struct xchk_ag {
|
||||
struct xfs_btree_cur *refc_cur;
|
||||
};
|
||||
|
||||
/* Inode lock state for the RT volume. */
|
||||
struct xchk_rt {
|
||||
/* incore rtgroup, if applicable */
|
||||
struct xfs_rtgroup *rtg;
|
||||
|
||||
/* XFS_RTGLOCK_* lock state if locked */
|
||||
unsigned int rtlock_flags;
|
||||
};
|
||||
|
||||
struct xfs_scrub {
|
||||
/* General scrub state. */
|
||||
struct xfs_mount *mp;
|
||||
@ -179,6 +189,9 @@ struct xfs_scrub {
|
||||
|
||||
/* State tracking for single-AG operations. */
|
||||
struct xchk_ag sa;
|
||||
|
||||
/* State tracking for realtime operations. */
|
||||
struct xchk_rt sr;
|
||||
};
|
||||
|
||||
/* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "xfs_iomap.h"
|
||||
#include "xfs_reflink.h"
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
|
||||
/* Kernel only BMAP related definitions and functions */
|
||||
|
||||
@ -41,7 +42,7 @@ xfs_daddr_t
|
||||
xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
|
||||
{
|
||||
if (XFS_IS_REALTIME_INODE(ip))
|
||||
return XFS_FSB_TO_BB(ip->i_mount, fsb);
|
||||
return xfs_rtb_to_daddr(ip->i_mount, fsb);
|
||||
return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
|
||||
/*
|
||||
* This is the number of entries in the l_buf_cancel_table used during
|
||||
@ -704,6 +705,7 @@ xlog_recover_do_primary_sb_buffer(
|
||||
{
|
||||
struct xfs_dsb *dsb = bp->b_addr;
|
||||
xfs_agnumber_t orig_agcount = mp->m_sb.sb_agcount;
|
||||
xfs_rgnumber_t orig_rgcount = mp->m_sb.sb_rgcount;
|
||||
int error;
|
||||
|
||||
xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
|
||||
@ -722,6 +724,11 @@ xlog_recover_do_primary_sb_buffer(
|
||||
xfs_alert(mp, "Shrinking AG count in log recovery not supported");
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
if (mp->m_sb.sb_rgcount < orig_rgcount) {
|
||||
xfs_warn(mp,
|
||||
"Shrinking rtgroup count in log recovery not supported");
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the last AG was grown or shrunk, we also need to update the
|
||||
@ -731,6 +738,17 @@ xlog_recover_do_primary_sb_buffer(
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* If the last rtgroup was grown or shrunk, we also need to update the
|
||||
* length in the in-core rtgroup structure and values depending on it.
|
||||
* Ignore this on any filesystem with zero rtgroups.
|
||||
*/
|
||||
if (orig_rgcount > 0) {
|
||||
error = xfs_update_last_rtgroup_size(mp, orig_rgcount);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the new perags, and also update various block and inode
|
||||
* allocator setting based off the number of AGs or total blocks.
|
||||
@ -744,6 +762,13 @@ xlog_recover_do_primary_sb_buffer(
|
||||
return error;
|
||||
}
|
||||
mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
|
||||
|
||||
error = xfs_initialize_rtgroups(mp, orig_rgcount, mp->m_sb.sb_rgcount,
|
||||
mp->m_sb.sb_rextents);
|
||||
if (error) {
|
||||
xfs_warn(mp, "Failed recovery rtgroup init: %d", error);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_health.h"
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
|
||||
/*
|
||||
* Notes on an efficient, low latency fstrim algorithm
|
||||
@ -506,7 +507,7 @@ xfs_discard_rtdev_extents(
|
||||
|
||||
static int
|
||||
xfs_trim_gather_rtextent(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_trans *tp,
|
||||
const struct xfs_rtalloc_rec *rec,
|
||||
void *priv)
|
||||
@ -525,12 +526,12 @@ xfs_trim_gather_rtextent(
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
rbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
|
||||
rlen = xfs_rtx_to_rtb(mp, rec->ar_extcount);
|
||||
rbno = xfs_rtx_to_rtb(rtg, rec->ar_startext);
|
||||
rlen = xfs_rtbxlen_to_blen(rtg_mount(rtg), rec->ar_extcount);
|
||||
|
||||
/* Ignore too small. */
|
||||
if (rlen < tr->minlen_fsb) {
|
||||
trace_xfs_discard_rttoosmall(mp, rbno, rlen);
|
||||
trace_xfs_discard_rttoosmall(rtg_mount(rtg), rbno, rlen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -548,69 +549,49 @@ xfs_trim_gather_rtextent(
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_trim_rtdev_extents(
|
||||
struct xfs_mount *mp,
|
||||
xfs_daddr_t start,
|
||||
xfs_daddr_t end,
|
||||
xfs_trim_rtextents(
|
||||
struct xfs_rtgroup *rtg,
|
||||
xfs_rtxnum_t low,
|
||||
xfs_rtxnum_t high,
|
||||
xfs_daddr_t minlen)
|
||||
{
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
struct xfs_trim_rtdev tr = {
|
||||
.minlen_fsb = XFS_BB_TO_FSB(mp, minlen),
|
||||
.extent_list = LIST_HEAD_INIT(tr.extent_list),
|
||||
};
|
||||
xfs_rtxnum_t low, high;
|
||||
struct xfs_trans *tp;
|
||||
xfs_daddr_t rtdev_daddr;
|
||||
int error;
|
||||
|
||||
INIT_LIST_HEAD(&tr.extent_list);
|
||||
|
||||
/* Shift the start and end downwards to match the rt device. */
|
||||
rtdev_daddr = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
|
||||
if (start > rtdev_daddr)
|
||||
start -= rtdev_daddr;
|
||||
else
|
||||
start = 0;
|
||||
|
||||
if (end <= rtdev_daddr)
|
||||
return 0;
|
||||
end -= rtdev_daddr;
|
||||
|
||||
error = xfs_trans_alloc_empty(mp, &tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
end = min_t(xfs_daddr_t, end,
|
||||
XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks) - 1);
|
||||
|
||||
/* Convert the rt blocks to rt extents */
|
||||
low = xfs_rtb_to_rtxup(mp, XFS_BB_TO_FSB(mp, start));
|
||||
high = xfs_rtb_to_rtx(mp, XFS_BB_TO_FSBT(mp, end));
|
||||
|
||||
/*
|
||||
* Walk the free ranges between low and high. The query_range function
|
||||
* trims the extents returned.
|
||||
*/
|
||||
do {
|
||||
tr.stop_rtx = low + (mp->m_sb.sb_blocksize * NBBY);
|
||||
xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP);
|
||||
error = xfs_rtalloc_query_range(mp, tp, low, high,
|
||||
xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
|
||||
error = xfs_rtalloc_query_range(rtg, tp, low, high,
|
||||
xfs_trim_gather_rtextent, &tr);
|
||||
|
||||
if (error == -ECANCELED)
|
||||
error = 0;
|
||||
if (error) {
|
||||
xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
|
||||
xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
|
||||
xfs_discard_free_rtdev_extents(&tr);
|
||||
break;
|
||||
}
|
||||
|
||||
if (list_empty(&tr.extent_list)) {
|
||||
xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
|
||||
xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
|
||||
break;
|
||||
}
|
||||
|
||||
error = xfs_discard_rtdev_extents(mp, &tr);
|
||||
xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
|
||||
xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
@ -620,6 +601,55 @@ xfs_trim_rtdev_extents(
|
||||
xfs_trans_cancel(tp);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_trim_rtdev_extents(
|
||||
struct xfs_mount *mp,
|
||||
xfs_daddr_t start,
|
||||
xfs_daddr_t end,
|
||||
xfs_daddr_t minlen)
|
||||
{
|
||||
xfs_rtblock_t start_rtbno, end_rtbno;
|
||||
xfs_rtxnum_t start_rtx, end_rtx;
|
||||
xfs_rgnumber_t start_rgno, end_rgno;
|
||||
int last_error = 0, error;
|
||||
struct xfs_rtgroup *rtg = NULL;
|
||||
|
||||
/* Shift the start and end downwards to match the rt device. */
|
||||
start_rtbno = xfs_daddr_to_rtb(mp, start);
|
||||
if (start_rtbno > mp->m_sb.sb_dblocks)
|
||||
start_rtbno -= mp->m_sb.sb_dblocks;
|
||||
else
|
||||
start_rtbno = 0;
|
||||
start_rtx = xfs_rtb_to_rtx(mp, start_rtbno);
|
||||
start_rgno = xfs_rtb_to_rgno(mp, start_rtbno);
|
||||
|
||||
end_rtbno = xfs_daddr_to_rtb(mp, end);
|
||||
if (end_rtbno <= mp->m_sb.sb_dblocks)
|
||||
return 0;
|
||||
end_rtbno -= mp->m_sb.sb_dblocks;
|
||||
end_rtx = xfs_rtb_to_rtx(mp, end_rtbno + mp->m_sb.sb_rextsize - 1);
|
||||
end_rgno = xfs_rtb_to_rgno(mp, end_rtbno);
|
||||
|
||||
while ((rtg = xfs_rtgroup_next_range(mp, rtg, start_rgno, end_rgno))) {
|
||||
xfs_rtxnum_t rtg_end = rtg->rtg_extents;
|
||||
|
||||
if (rtg_rgno(rtg) == end_rgno)
|
||||
rtg_end = min(rtg_end, end_rtx);
|
||||
|
||||
error = xfs_trim_rtextents(rtg, start_rtx, rtg_end, minlen);
|
||||
if (error)
|
||||
last_error = error;
|
||||
|
||||
if (xfs_trim_should_stop()) {
|
||||
xfs_rtgroup_rele(rtg);
|
||||
break;
|
||||
}
|
||||
start_rtx = 0;
|
||||
}
|
||||
|
||||
return last_error;
|
||||
}
|
||||
#else
|
||||
# define xfs_trim_rtdev_extents(...) (-EOPNOTSUPP)
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "xfs_alloc_btree.h"
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
|
||||
/* Convert an xfs_fsmap to an fsmap. */
|
||||
static void
|
||||
@ -110,18 +111,18 @@ xfs_fsmap_owner_to_rmap(
|
||||
|
||||
/* Convert an rmapbt owner into an fsmap owner. */
|
||||
static int
|
||||
xfs_fsmap_owner_from_rmap(
|
||||
xfs_fsmap_owner_from_frec(
|
||||
struct xfs_fsmap *dest,
|
||||
const struct xfs_rmap_irec *src)
|
||||
const struct xfs_fsmap_irec *frec)
|
||||
{
|
||||
dest->fmr_flags = 0;
|
||||
if (!XFS_RMAP_NON_INODE_OWNER(src->rm_owner)) {
|
||||
dest->fmr_owner = src->rm_owner;
|
||||
if (!XFS_RMAP_NON_INODE_OWNER(frec->owner)) {
|
||||
dest->fmr_owner = frec->owner;
|
||||
return 0;
|
||||
}
|
||||
dest->fmr_flags |= FMR_OF_SPECIAL_OWNER;
|
||||
|
||||
switch (src->rm_owner) {
|
||||
switch (frec->owner) {
|
||||
case XFS_RMAP_OWN_FS:
|
||||
dest->fmr_owner = XFS_FMR_OWN_FS;
|
||||
break;
|
||||
@ -203,7 +204,7 @@ STATIC int
|
||||
xfs_getfsmap_is_shared(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_getfsmap_info *info,
|
||||
const struct xfs_rmap_irec *rec,
|
||||
const struct xfs_fsmap_irec *frec,
|
||||
bool *stat)
|
||||
{
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
@ -224,8 +225,9 @@ xfs_getfsmap_is_shared(
|
||||
cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp,
|
||||
to_perag(info->group));
|
||||
|
||||
error = xfs_refcount_find_shared(cur, rec->rm_startblock,
|
||||
rec->rm_blockcount, &fbno, &flen, false);
|
||||
error = xfs_refcount_find_shared(cur, frec->rec_key,
|
||||
XFS_BB_TO_FSBT(mp, frec->len_daddr), &fbno, &flen,
|
||||
false);
|
||||
|
||||
xfs_btree_del_cursor(cur, error);
|
||||
if (error)
|
||||
@ -250,15 +252,22 @@ xfs_getfsmap_format(
|
||||
}
|
||||
|
||||
static inline bool
|
||||
xfs_getfsmap_rec_before_start(
|
||||
xfs_getfsmap_frec_before_start(
|
||||
struct xfs_getfsmap_info *info,
|
||||
const struct xfs_rmap_irec *rec,
|
||||
xfs_daddr_t rec_daddr)
|
||||
const struct xfs_fsmap_irec *frec)
|
||||
{
|
||||
if (info->low_daddr != XFS_BUF_DADDR_NULL)
|
||||
return rec_daddr < info->low_daddr;
|
||||
if (info->low.rm_blockcount)
|
||||
return xfs_rmap_compare(rec, &info->low) < 0;
|
||||
return frec->start_daddr < info->low_daddr;
|
||||
if (info->low.rm_blockcount) {
|
||||
struct xfs_rmap_irec rec = {
|
||||
.rm_startblock = frec->rec_key,
|
||||
.rm_owner = frec->owner,
|
||||
.rm_flags = frec->rm_flags,
|
||||
};
|
||||
|
||||
return xfs_rmap_compare(&rec, &info->low) < 0;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -271,61 +280,36 @@ STATIC int
|
||||
xfs_getfsmap_helper(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_getfsmap_info *info,
|
||||
const struct xfs_rmap_irec *rec,
|
||||
xfs_daddr_t rec_daddr,
|
||||
xfs_daddr_t len_daddr)
|
||||
const struct xfs_fsmap_irec *frec)
|
||||
{
|
||||
struct xfs_fsmap fmr;
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
bool shared;
|
||||
int error;
|
||||
int error = 0;
|
||||
|
||||
if (fatal_signal_pending(current))
|
||||
return -EINTR;
|
||||
|
||||
if (len_daddr == 0)
|
||||
len_daddr = XFS_FSB_TO_BB(mp, rec->rm_blockcount);
|
||||
|
||||
/*
|
||||
* Filter out records that start before our startpoint, if the
|
||||
* caller requested that.
|
||||
*/
|
||||
if (xfs_getfsmap_rec_before_start(info, rec, rec_daddr)) {
|
||||
rec_daddr += len_daddr;
|
||||
if (info->next_daddr < rec_daddr)
|
||||
info->next_daddr = rec_daddr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For an info->last query, we're looking for a gap between the last
|
||||
* mapping emitted and the high key specified by userspace. If the
|
||||
* user's query spans less than 1 fsblock, then info->high and
|
||||
* info->low will have the same rm_startblock, which causes rec_daddr
|
||||
* and next_daddr to be the same. Therefore, use the end_daddr that
|
||||
* we calculated from userspace's high key to synthesize the record.
|
||||
* Note that if the btree query found a mapping, there won't be a gap.
|
||||
*/
|
||||
if (info->last && info->end_daddr != XFS_BUF_DADDR_NULL)
|
||||
rec_daddr = info->end_daddr;
|
||||
if (xfs_getfsmap_frec_before_start(info, frec))
|
||||
goto out;
|
||||
|
||||
/* Are we just counting mappings? */
|
||||
if (info->head->fmh_count == 0) {
|
||||
if (info->head->fmh_entries == UINT_MAX)
|
||||
return -ECANCELED;
|
||||
|
||||
if (rec_daddr > info->next_daddr)
|
||||
if (frec->start_daddr > info->next_daddr)
|
||||
info->head->fmh_entries++;
|
||||
|
||||
if (info->last)
|
||||
return 0;
|
||||
|
||||
info->head->fmh_entries++;
|
||||
|
||||
rec_daddr += len_daddr;
|
||||
if (info->next_daddr < rec_daddr)
|
||||
info->next_daddr = rec_daddr;
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -333,7 +317,7 @@ xfs_getfsmap_helper(
|
||||
* then we've found a gap. Report the gap as being owned by
|
||||
* whatever the caller specified is the missing owner.
|
||||
*/
|
||||
if (rec_daddr > info->next_daddr) {
|
||||
if (frec->start_daddr > info->next_daddr) {
|
||||
if (info->head->fmh_entries >= info->head->fmh_count)
|
||||
return -ECANCELED;
|
||||
|
||||
@ -341,7 +325,7 @@ xfs_getfsmap_helper(
|
||||
fmr.fmr_physical = info->next_daddr;
|
||||
fmr.fmr_owner = info->missing_owner;
|
||||
fmr.fmr_offset = 0;
|
||||
fmr.fmr_length = rec_daddr - info->next_daddr;
|
||||
fmr.fmr_length = frec->start_daddr - info->next_daddr;
|
||||
fmr.fmr_flags = FMR_OF_SPECIAL_OWNER;
|
||||
xfs_getfsmap_format(mp, &fmr, info);
|
||||
}
|
||||
@ -355,23 +339,23 @@ xfs_getfsmap_helper(
|
||||
|
||||
trace_xfs_fsmap_mapping(mp, info->dev,
|
||||
info->group ? info->group->xg_gno : NULLAGNUMBER,
|
||||
rec);
|
||||
frec);
|
||||
|
||||
fmr.fmr_device = info->dev;
|
||||
fmr.fmr_physical = rec_daddr;
|
||||
error = xfs_fsmap_owner_from_rmap(&fmr, rec);
|
||||
fmr.fmr_physical = frec->start_daddr;
|
||||
error = xfs_fsmap_owner_from_frec(&fmr, frec);
|
||||
if (error)
|
||||
return error;
|
||||
fmr.fmr_offset = XFS_FSB_TO_BB(mp, rec->rm_offset);
|
||||
fmr.fmr_length = len_daddr;
|
||||
if (rec->rm_flags & XFS_RMAP_UNWRITTEN)
|
||||
fmr.fmr_offset = XFS_FSB_TO_BB(mp, frec->offset);
|
||||
fmr.fmr_length = frec->len_daddr;
|
||||
if (frec->rm_flags & XFS_RMAP_UNWRITTEN)
|
||||
fmr.fmr_flags |= FMR_OF_PREALLOC;
|
||||
if (rec->rm_flags & XFS_RMAP_ATTR_FORK)
|
||||
if (frec->rm_flags & XFS_RMAP_ATTR_FORK)
|
||||
fmr.fmr_flags |= FMR_OF_ATTR_FORK;
|
||||
if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
|
||||
if (frec->rm_flags & XFS_RMAP_BMBT_BLOCK)
|
||||
fmr.fmr_flags |= FMR_OF_EXTENT_MAP;
|
||||
if (fmr.fmr_flags == 0) {
|
||||
error = xfs_getfsmap_is_shared(tp, info, rec, &shared);
|
||||
error = xfs_getfsmap_is_shared(tp, info, frec, &shared);
|
||||
if (error)
|
||||
return error;
|
||||
if (shared)
|
||||
@ -380,25 +364,55 @@ xfs_getfsmap_helper(
|
||||
|
||||
xfs_getfsmap_format(mp, &fmr, info);
|
||||
out:
|
||||
rec_daddr += len_daddr;
|
||||
if (info->next_daddr < rec_daddr)
|
||||
info->next_daddr = rec_daddr;
|
||||
info->next_daddr = max(info->next_daddr,
|
||||
frec->start_daddr + frec->len_daddr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
xfs_getfsmap_group_helper(
|
||||
struct xfs_getfsmap_info *info,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_group *xg,
|
||||
xfs_agblock_t startblock,
|
||||
xfs_extlen_t blockcount,
|
||||
struct xfs_fsmap_irec *frec)
|
||||
{
|
||||
/*
|
||||
* For an info->last query, we're looking for a gap between the last
|
||||
* mapping emitted and the high key specified by userspace. If the
|
||||
* user's query spans less than 1 fsblock, then info->high and
|
||||
* info->low will have the same rm_startblock, which causes rec_daddr
|
||||
* and next_daddr to be the same. Therefore, use the end_daddr that
|
||||
* we calculated from userspace's high key to synthesize the record.
|
||||
* Note that if the btree query found a mapping, there won't be a gap.
|
||||
*/
|
||||
if (info->last && info->end_daddr != XFS_BUF_DADDR_NULL)
|
||||
frec->start_daddr = info->end_daddr;
|
||||
else
|
||||
frec->start_daddr = xfs_gbno_to_daddr(xg, startblock);
|
||||
|
||||
frec->len_daddr = XFS_FSB_TO_BB(xg->xg_mount, blockcount);
|
||||
return xfs_getfsmap_helper(tp, info, frec);
|
||||
}
|
||||
|
||||
/* Transform a rmapbt irec into a fsmap */
|
||||
STATIC int
|
||||
xfs_getfsmap_datadev_helper(
|
||||
xfs_getfsmap_rmapbt_helper(
|
||||
struct xfs_btree_cur *cur,
|
||||
const struct xfs_rmap_irec *rec,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_fsmap_irec frec = {
|
||||
.owner = rec->rm_owner,
|
||||
.offset = rec->rm_offset,
|
||||
.rm_flags = rec->rm_flags,
|
||||
.rec_key = rec->rm_startblock,
|
||||
};
|
||||
struct xfs_getfsmap_info *info = priv;
|
||||
|
||||
return xfs_getfsmap_helper(cur->bc_tp, info, rec,
|
||||
xfs_agbno_to_daddr(to_perag(cur->bc_group),
|
||||
rec->rm_startblock),
|
||||
0);
|
||||
return xfs_getfsmap_group_helper(info, cur->bc_tp, cur->bc_group,
|
||||
rec->rm_startblock, rec->rm_blockcount, &frec);
|
||||
}
|
||||
|
||||
/* Transform a bnobt irec into a fsmap */
|
||||
@ -408,19 +422,14 @@ xfs_getfsmap_datadev_bnobt_helper(
|
||||
const struct xfs_alloc_rec_incore *rec,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_fsmap_irec frec = {
|
||||
.owner = XFS_RMAP_OWN_NULL, /* "free" */
|
||||
.rec_key = rec->ar_startblock,
|
||||
};
|
||||
struct xfs_getfsmap_info *info = priv;
|
||||
struct xfs_rmap_irec irec;
|
||||
|
||||
irec.rm_startblock = rec->ar_startblock;
|
||||
irec.rm_blockcount = rec->ar_blockcount;
|
||||
irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
|
||||
irec.rm_offset = 0;
|
||||
irec.rm_flags = 0;
|
||||
|
||||
return xfs_getfsmap_helper(cur->bc_tp, info, &irec,
|
||||
xfs_agbno_to_daddr(to_perag(cur->bc_group),
|
||||
rec->ar_startblock),
|
||||
0);
|
||||
return xfs_getfsmap_group_helper(info, cur->bc_tp, cur->bc_group,
|
||||
rec->ar_startblock, rec->ar_blockcount, &frec);
|
||||
}
|
||||
|
||||
/* Set rmap flags based on the getfsmap flags */
|
||||
@ -544,9 +553,9 @@ __xfs_getfsmap_datadev(
|
||||
if (error)
|
||||
break;
|
||||
|
||||
trace_xfs_fsmap_low_key(mp, info->dev, pag_agno(pag),
|
||||
trace_xfs_fsmap_low_group_key(mp, info->dev, pag_agno(pag),
|
||||
&info->low);
|
||||
trace_xfs_fsmap_high_key(mp, info->dev, pag_agno(pag),
|
||||
trace_xfs_fsmap_high_group_key(mp, info->dev, pag_agno(pag),
|
||||
&info->high);
|
||||
|
||||
error = query_fn(tp, info, &bt_cur, priv);
|
||||
@ -602,13 +611,13 @@ xfs_getfsmap_datadev_rmapbt_query(
|
||||
{
|
||||
/* Report any gap at the end of the last AG. */
|
||||
if (info->last)
|
||||
return xfs_getfsmap_datadev_helper(*curpp, &info->high, info);
|
||||
return xfs_getfsmap_rmapbt_helper(*curpp, &info->high, info);
|
||||
|
||||
/* Allocate cursor for this AG and query_range it. */
|
||||
*curpp = xfs_rmapbt_init_cursor(tp->t_mountp, tp, info->agf_bp,
|
||||
to_perag(info->group));
|
||||
return xfs_rmap_query_range(*curpp, &info->low, &info->high,
|
||||
xfs_getfsmap_datadev_helper, info);
|
||||
xfs_getfsmap_rmapbt_helper, info);
|
||||
}
|
||||
|
||||
/* Execute a getfsmap query against the regular data device rmapbt. */
|
||||
@ -668,9 +677,12 @@ xfs_getfsmap_logdev(
|
||||
const struct xfs_fsmap *keys,
|
||||
struct xfs_getfsmap_info *info)
|
||||
{
|
||||
struct xfs_fsmap_irec frec = {
|
||||
.start_daddr = 0,
|
||||
.rec_key = 0,
|
||||
.owner = XFS_RMAP_OWN_LOG,
|
||||
};
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
struct xfs_rmap_irec rmap;
|
||||
xfs_daddr_t rec_daddr, len_daddr;
|
||||
xfs_fsblock_t start_fsb, end_fsb;
|
||||
uint64_t eofs;
|
||||
|
||||
@ -685,51 +697,53 @@ xfs_getfsmap_logdev(
|
||||
if (keys[0].fmr_length > 0)
|
||||
info->low_daddr = XFS_FSB_TO_BB(mp, start_fsb);
|
||||
|
||||
trace_xfs_fsmap_low_key_linear(mp, info->dev, start_fsb);
|
||||
trace_xfs_fsmap_high_key_linear(mp, info->dev, end_fsb);
|
||||
trace_xfs_fsmap_low_linear_key(mp, info->dev, start_fsb);
|
||||
trace_xfs_fsmap_high_linear_key(mp, info->dev, end_fsb);
|
||||
|
||||
if (start_fsb > 0)
|
||||
return 0;
|
||||
|
||||
/* Fabricate an rmap entry for the external log device. */
|
||||
rmap.rm_startblock = 0;
|
||||
rmap.rm_blockcount = mp->m_sb.sb_logblocks;
|
||||
rmap.rm_owner = XFS_RMAP_OWN_LOG;
|
||||
rmap.rm_offset = 0;
|
||||
rmap.rm_flags = 0;
|
||||
|
||||
rec_daddr = XFS_FSB_TO_BB(mp, rmap.rm_startblock);
|
||||
len_daddr = XFS_FSB_TO_BB(mp, rmap.rm_blockcount);
|
||||
return xfs_getfsmap_helper(tp, info, &rmap, rec_daddr, len_daddr);
|
||||
frec.len_daddr = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
|
||||
return xfs_getfsmap_helper(tp, info, &frec);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XFS_RT
|
||||
/* Transform a rtbitmap "record" into a fsmap */
|
||||
STATIC int
|
||||
xfs_getfsmap_rtdev_rtbitmap_helper(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_trans *tp,
|
||||
const struct xfs_rtalloc_rec *rec,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_fsmap_irec frec = {
|
||||
.owner = XFS_RMAP_OWN_NULL, /* "free" */
|
||||
};
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
struct xfs_getfsmap_info *info = priv;
|
||||
struct xfs_rmap_irec irec;
|
||||
xfs_rtblock_t rtbno;
|
||||
xfs_daddr_t rec_daddr, len_daddr;
|
||||
xfs_rtblock_t start_rtb =
|
||||
xfs_rtx_to_rtb(rtg, rec->ar_startext);
|
||||
uint64_t rtbcount =
|
||||
xfs_rtbxlen_to_blen(mp, rec->ar_extcount);
|
||||
|
||||
rtbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
|
||||
rec_daddr = XFS_FSB_TO_BB(mp, rtbno);
|
||||
irec.rm_startblock = rtbno;
|
||||
/*
|
||||
* For an info->last query, we're looking for a gap between the last
|
||||
* mapping emitted and the high key specified by userspace. If the
|
||||
* user's query spans less than 1 fsblock, then info->high and
|
||||
* info->low will have the same rm_startblock, which causes rec_daddr
|
||||
* and next_daddr to be the same. Therefore, use the end_daddr that
|
||||
* we calculated from userspace's high key to synthesize the record.
|
||||
* Note that if the btree query found a mapping, there won't be a gap.
|
||||
*/
|
||||
if (info->last && info->end_daddr != XFS_BUF_DADDR_NULL) {
|
||||
frec.start_daddr = info->end_daddr;
|
||||
} else {
|
||||
frec.start_daddr = xfs_rtb_to_daddr(mp, start_rtb);
|
||||
}
|
||||
|
||||
rtbno = xfs_rtx_to_rtb(mp, rec->ar_extcount);
|
||||
len_daddr = XFS_FSB_TO_BB(mp, rtbno);
|
||||
irec.rm_blockcount = rtbno;
|
||||
|
||||
irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
|
||||
irec.rm_offset = 0;
|
||||
irec.rm_flags = 0;
|
||||
|
||||
return xfs_getfsmap_helper(tp, info, &irec, rec_daddr, len_daddr);
|
||||
frec.len_daddr = XFS_FSB_TO_BB(mp, rtbcount);
|
||||
return xfs_getfsmap_helper(tp, info, &frec);
|
||||
}
|
||||
|
||||
/* Execute a getfsmap query against the realtime device rtbitmap. */
|
||||
@ -739,58 +753,83 @@ xfs_getfsmap_rtdev_rtbitmap(
|
||||
const struct xfs_fsmap *keys,
|
||||
struct xfs_getfsmap_info *info)
|
||||
{
|
||||
|
||||
struct xfs_rtalloc_rec ahigh = { 0 };
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
xfs_rtblock_t start_rtb;
|
||||
xfs_rtblock_t end_rtb;
|
||||
xfs_rtxnum_t high;
|
||||
xfs_rtblock_t start_rtbno, end_rtbno;
|
||||
xfs_rtxnum_t start_rtx, end_rtx;
|
||||
xfs_rgnumber_t start_rgno, end_rgno;
|
||||
struct xfs_rtgroup *rtg = NULL;
|
||||
uint64_t eofs;
|
||||
int error;
|
||||
|
||||
eofs = XFS_FSB_TO_BB(mp, xfs_rtx_to_rtb(mp, mp->m_sb.sb_rextents));
|
||||
eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
|
||||
if (keys[0].fmr_physical >= eofs)
|
||||
return 0;
|
||||
start_rtb = XFS_BB_TO_FSBT(mp,
|
||||
keys[0].fmr_physical + keys[0].fmr_length);
|
||||
end_rtb = XFS_BB_TO_FSB(mp, min(eofs - 1, keys[1].fmr_physical));
|
||||
|
||||
info->missing_owner = XFS_FMR_OWN_UNKNOWN;
|
||||
|
||||
/* Adjust the low key if we are continuing from where we left off. */
|
||||
start_rtbno = xfs_daddr_to_rtb(mp,
|
||||
keys[0].fmr_physical + keys[0].fmr_length);
|
||||
if (keys[0].fmr_length > 0) {
|
||||
info->low_daddr = XFS_FSB_TO_BB(mp, start_rtb);
|
||||
info->low_daddr = xfs_rtb_to_daddr(mp, start_rtbno);
|
||||
if (info->low_daddr >= eofs)
|
||||
return 0;
|
||||
}
|
||||
start_rtx = xfs_rtb_to_rtx(mp, start_rtbno);
|
||||
start_rgno = xfs_rtb_to_rgno(mp, start_rtbno);
|
||||
|
||||
trace_xfs_fsmap_low_key_linear(mp, info->dev, start_rtb);
|
||||
trace_xfs_fsmap_high_key_linear(mp, info->dev, end_rtb);
|
||||
end_rtbno = xfs_daddr_to_rtb(mp, min(eofs - 1, keys[1].fmr_physical));
|
||||
end_rgno = xfs_rtb_to_rgno(mp, end_rtbno);
|
||||
|
||||
xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP);
|
||||
trace_xfs_fsmap_low_linear_key(mp, info->dev, start_rtbno);
|
||||
trace_xfs_fsmap_high_linear_key(mp, info->dev, end_rtbno);
|
||||
|
||||
/*
|
||||
* Set up query parameters to return free rtextents covering the range
|
||||
* we want.
|
||||
*/
|
||||
high = xfs_rtb_to_rtxup(mp, end_rtb);
|
||||
error = xfs_rtalloc_query_range(mp, tp, xfs_rtb_to_rtx(mp, start_rtb),
|
||||
high, xfs_getfsmap_rtdev_rtbitmap_helper, info);
|
||||
if (error)
|
||||
goto err;
|
||||
end_rtx = -1ULL;
|
||||
|
||||
/*
|
||||
* Report any gaps at the end of the rtbitmap by simulating a null
|
||||
* rmap starting at the block after the end of the query range.
|
||||
*/
|
||||
info->last = true;
|
||||
ahigh.ar_startext = min(mp->m_sb.sb_rextents, high);
|
||||
while ((rtg = xfs_rtgroup_next_range(mp, rtg, start_rgno, end_rgno))) {
|
||||
if (rtg_rgno(rtg) == end_rgno)
|
||||
end_rtx = xfs_rtb_to_rtx(mp,
|
||||
end_rtbno + mp->m_sb.sb_rextsize - 1);
|
||||
|
||||
info->group = rtg_group(rtg);
|
||||
xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
|
||||
error = xfs_rtalloc_query_range(rtg, tp, start_rtx, end_rtx,
|
||||
xfs_getfsmap_rtdev_rtbitmap_helper, info);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Report any gaps at the end of the rtbitmap by simulating a
|
||||
* zero-length free extent starting at the rtx after the end
|
||||
* of the query range.
|
||||
*/
|
||||
if (rtg_rgno(rtg) == end_rgno) {
|
||||
struct xfs_rtalloc_rec ahigh = {
|
||||
.ar_startext = min(end_rtx + 1,
|
||||
rtg->rtg_extents),
|
||||
};
|
||||
|
||||
info->last = true;
|
||||
error = xfs_getfsmap_rtdev_rtbitmap_helper(rtg, tp,
|
||||
&ahigh, info);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
|
||||
xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
|
||||
info->group = NULL;
|
||||
start_rtx = 0;
|
||||
}
|
||||
|
||||
/* loop termination case */
|
||||
if (rtg) {
|
||||
if (info->group) {
|
||||
xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
|
||||
info->group = NULL;
|
||||
}
|
||||
xfs_rtgroup_rele(rtg);
|
||||
}
|
||||
|
||||
error = xfs_getfsmap_rtdev_rtbitmap_helper(mp, tp, &ahigh, info);
|
||||
if (error)
|
||||
goto err;
|
||||
err:
|
||||
xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
|
||||
return error;
|
||||
}
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
@ -28,6 +28,21 @@ struct xfs_fsmap_head {
|
||||
struct xfs_fsmap fmh_keys[2]; /* low and high keys */
|
||||
};
|
||||
|
||||
/* internal fsmap record format */
|
||||
struct xfs_fsmap_irec {
|
||||
xfs_daddr_t start_daddr;
|
||||
xfs_daddr_t len_daddr;
|
||||
uint64_t owner; /* extent owner */
|
||||
uint64_t offset; /* offset within the owner */
|
||||
unsigned int rm_flags; /* rmap state flags */
|
||||
|
||||
/*
|
||||
* rmapbt startblock corresponding to start_daddr, if the record came
|
||||
* from an rmap btree.
|
||||
*/
|
||||
xfs_agblock_t rec_key;
|
||||
};
|
||||
|
||||
int xfs_ioc_getfsmap(struct xfs_inode *ip, struct fsmap_head __user *arg);
|
||||
|
||||
#endif /* __XFS_FSMAP_H__ */
|
||||
|
@ -342,8 +342,7 @@ xfs_lock_inumorder(
|
||||
{
|
||||
uint class = 0;
|
||||
|
||||
ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
|
||||
XFS_ILOCK_RTSUM)));
|
||||
ASSERT(!(lock_mode & XFS_ILOCK_PARENT));
|
||||
ASSERT(xfs_lockdep_subclass_ok(subclass));
|
||||
|
||||
if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
|
||||
|
@ -448,9 +448,8 @@ static inline bool xfs_inode_has_bigrtalloc(const struct xfs_inode *ip)
|
||||
* However, MAX_LOCKDEP_SUBCLASSES == 8, which means we are greatly
|
||||
* limited to the subclasses we can represent via nesting. We need at least
|
||||
* 5 inodes nest depth for the ILOCK through rename, and we also have to support
|
||||
* XFS_ILOCK_PARENT, which gives 6 subclasses. Then we have XFS_ILOCK_RTBITMAP
|
||||
* and XFS_ILOCK_RTSUM, which are another 2 unique subclasses, so that's all
|
||||
* 8 subclasses supported by lockdep.
|
||||
* XFS_ILOCK_PARENT, which gives 6 subclasses. That's 6 of the 8 subclasses
|
||||
* supported by lockdep.
|
||||
*
|
||||
* This also means we have to number the sub-classes in the lowest bits of
|
||||
* the mask we keep, and we have to ensure we never exceed 3 bits of lockdep
|
||||
@ -476,8 +475,8 @@ static inline bool xfs_inode_has_bigrtalloc(const struct xfs_inode *ip)
|
||||
* ILOCK values
|
||||
* 0-4 subclass values
|
||||
* 5 PARENT subclass (not nestable)
|
||||
* 6 RTBITMAP subclass (not nestable)
|
||||
* 7 RTSUM subclass (not nestable)
|
||||
* 6 unused
|
||||
* 7 unused
|
||||
*
|
||||
*/
|
||||
#define XFS_IOLOCK_SHIFT 16
|
||||
@ -492,12 +491,8 @@ static inline bool xfs_inode_has_bigrtalloc(const struct xfs_inode *ip)
|
||||
#define XFS_ILOCK_SHIFT 24
|
||||
#define XFS_ILOCK_PARENT_VAL 5u
|
||||
#define XFS_ILOCK_MAX_SUBCLASS (XFS_ILOCK_PARENT_VAL - 1)
|
||||
#define XFS_ILOCK_RTBITMAP_VAL 6u
|
||||
#define XFS_ILOCK_RTSUM_VAL 7u
|
||||
#define XFS_ILOCK_DEP_MASK 0xff000000u
|
||||
#define XFS_ILOCK_PARENT (XFS_ILOCK_PARENT_VAL << XFS_ILOCK_SHIFT)
|
||||
#define XFS_ILOCK_RTBITMAP (XFS_ILOCK_RTBITMAP_VAL << XFS_ILOCK_SHIFT)
|
||||
#define XFS_ILOCK_RTSUM (XFS_ILOCK_RTSUM_VAL << XFS_ILOCK_SHIFT)
|
||||
|
||||
#define XFS_LOCK_SUBCLASS_MASK (XFS_IOLOCK_DEP_MASK | \
|
||||
XFS_MMAPLOCK_DEP_MASK | \
|
||||
|
@ -501,8 +501,8 @@ xfs_iomap_prealloc_size(
|
||||
alloc_blocks);
|
||||
|
||||
if (unlikely(XFS_IS_REALTIME_INODE(ip)))
|
||||
freesp = xfs_rtx_to_rtb(mp,
|
||||
xfs_iomap_freesp(&mp->m_frextents,
|
||||
freesp = xfs_rtbxlen_to_blen(mp,
|
||||
xfs_iomap_freesp(&mp->m_frextents,
|
||||
mp->m_low_rtexts, &shift));
|
||||
else
|
||||
freesp = xfs_iomap_freesp(&mp->m_fdblocks, mp->m_low_space,
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "xfs_metafile.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
#include "scrub/stats.h"
|
||||
|
||||
static DEFINE_MUTEX(xfs_uuid_table_mutex);
|
||||
@ -834,10 +835,17 @@ xfs_mountfs(
|
||||
goto out_free_dir;
|
||||
}
|
||||
|
||||
error = xfs_initialize_rtgroups(mp, 0, sbp->sb_rgcount,
|
||||
mp->m_sb.sb_rextents);
|
||||
if (error) {
|
||||
xfs_warn(mp, "Failed rtgroup init: %d", error);
|
||||
goto out_free_perag;
|
||||
}
|
||||
|
||||
if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) {
|
||||
xfs_warn(mp, "no log defined");
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_free_perag;
|
||||
goto out_free_rtgroup;
|
||||
}
|
||||
|
||||
error = xfs_inodegc_register_shrinker(mp);
|
||||
@ -1072,6 +1080,8 @@ xfs_mountfs(
|
||||
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
|
||||
xfs_buftarg_drain(mp->m_logdev_targp);
|
||||
xfs_buftarg_drain(mp->m_ddev_targp);
|
||||
out_free_rtgroup:
|
||||
xfs_free_rtgroups(mp, 0, mp->m_sb.sb_rgcount);
|
||||
out_free_perag:
|
||||
xfs_free_perag_range(mp, 0, mp->m_sb.sb_agcount);
|
||||
out_free_dir:
|
||||
@ -1156,6 +1166,7 @@ xfs_unmountfs(
|
||||
xfs_errortag_clearall(mp);
|
||||
#endif
|
||||
shrinker_free(mp->m_inodegc_shrinker);
|
||||
xfs_free_rtgroups(mp, 0, mp->m_sb.sb_rgcount);
|
||||
xfs_free_perag_range(mp, 0, mp->m_sb.sb_agcount);
|
||||
xfs_errortag_del(mp);
|
||||
xfs_error_sysfs_del(mp);
|
||||
@ -1463,7 +1474,7 @@ xfs_mod_delalloc(
|
||||
|
||||
if (XFS_IS_REALTIME_INODE(ip)) {
|
||||
percpu_counter_add_batch(&mp->m_delalloc_rtextents,
|
||||
xfs_rtb_to_rtx(mp, data_delta),
|
||||
xfs_blen_to_rtbxlen(mp, data_delta),
|
||||
XFS_DELALLOC_BATCH);
|
||||
if (!ind_delta)
|
||||
return;
|
||||
|
@ -124,23 +124,14 @@ typedef struct xfs_mount {
|
||||
struct xfs_da_geometry *m_dir_geo; /* directory block geometry */
|
||||
struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */
|
||||
struct xlog *m_log; /* log specific stuff */
|
||||
struct xfs_inode *m_rbmip; /* pointer to bitmap inode */
|
||||
struct xfs_inode *m_rsumip; /* pointer to summary inode */
|
||||
struct xfs_inode *m_rootip; /* pointer to root directory */
|
||||
struct xfs_inode *m_metadirip; /* ptr to metadata directory */
|
||||
struct xfs_inode *m_rtdirip; /* ptr to realtime metadir */
|
||||
struct xfs_quotainfo *m_quotainfo; /* disk quota information */
|
||||
struct xfs_buftarg *m_ddev_targp; /* data device */
|
||||
struct xfs_buftarg *m_logdev_targp;/* log device */
|
||||
struct xfs_buftarg *m_rtdev_targp; /* rt device */
|
||||
void __percpu *m_inodegc; /* percpu inodegc structures */
|
||||
|
||||
/*
|
||||
* Optional cache of rt summary level per bitmap block with the
|
||||
* invariant that m_rsum_cache[bbno] > the maximum i for which
|
||||
* rsum[i][bbno] != 0, or 0 if rsum[i][bbno] == 0 for all i.
|
||||
* Reads and writes are serialized by the rsumip inode lock.
|
||||
*/
|
||||
uint8_t *m_rsum_cache;
|
||||
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
|
||||
struct workqueue_struct *m_buf_workqueue;
|
||||
struct workqueue_struct *m_unwritten_workqueue;
|
||||
@ -155,6 +146,7 @@ typedef struct xfs_mount {
|
||||
uint8_t m_agno_log; /* log #ag's */
|
||||
uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
|
||||
int8_t m_rtxblklog; /* log2 of rextsize, if possible */
|
||||
int8_t m_rgblklog; /* log2 of rt group sz if possible */
|
||||
uint m_blockmask; /* sb_blocksize-1 */
|
||||
uint m_blockwsize; /* sb_blocksize in words */
|
||||
uint m_blockwmask; /* blockwsize-1 */
|
||||
@ -181,14 +173,16 @@ typedef struct xfs_mount {
|
||||
uint m_allocsize_blocks; /* min write size blocks */
|
||||
int m_logbufs; /* number of log buffers */
|
||||
int m_logbsize; /* size of each log buffer */
|
||||
uint m_rsumlevels; /* rt summary levels */
|
||||
unsigned int m_rsumlevels; /* rt summary levels */
|
||||
xfs_filblks_t m_rsumblocks; /* size of rt summary, FSBs */
|
||||
uint32_t m_rgblocks; /* size of rtgroup in rtblocks */
|
||||
int m_fixedfsid[2]; /* unchanged for life of FS */
|
||||
uint m_qflags; /* quota status flags */
|
||||
uint64_t m_features; /* active filesystem features */
|
||||
uint64_t m_low_space[XFS_LOWSP_MAX];
|
||||
uint64_t m_low_rtexts[XFS_LOWSP_MAX];
|
||||
uint64_t m_rtxblkmask; /* rt extent block mask */
|
||||
uint64_t m_rgblkmask; /* rt group block mask */
|
||||
struct xfs_ino_geometry m_ino_geo; /* inode geometry */
|
||||
struct xfs_trans_resv m_resv; /* precomputed res values */
|
||||
/* low free space thresholds */
|
||||
@ -391,6 +385,16 @@ __XFS_HAS_FEAT(large_extent_counts, NREXT64)
|
||||
__XFS_HAS_FEAT(exchange_range, EXCHANGE_RANGE)
|
||||
__XFS_HAS_FEAT(metadir, METADIR)
|
||||
|
||||
static inline bool xfs_has_rtgroups(struct xfs_mount *mp)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool xfs_has_rtsb(struct xfs_mount *mp)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some features are always on for v5 file systems, allow the compiler to
|
||||
* eliminiate dead code when building without v4 support.
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "xfs_health.h"
|
||||
#include "xfs_da_format.h"
|
||||
#include "xfs_metafile.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
|
||||
/*
|
||||
* The global quota manager. There is only one of these for the entire
|
||||
@ -210,6 +211,21 @@ xfs_qm_unmount(
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_qm_unmount_rt(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
struct xfs_rtgroup *rtg = xfs_rtgroup_grab(mp, 0);
|
||||
|
||||
if (!rtg)
|
||||
return;
|
||||
if (rtg->rtg_inodes[XFS_RTGI_BITMAP])
|
||||
xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_BITMAP]);
|
||||
if (rtg->rtg_inodes[XFS_RTGI_SUMMARY])
|
||||
xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_SUMMARY]);
|
||||
xfs_rtgroup_rele(rtg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from the vfsops layer.
|
||||
*/
|
||||
@ -223,10 +239,13 @@ xfs_qm_unmount_quotas(
|
||||
*/
|
||||
ASSERT(mp->m_rootip);
|
||||
xfs_qm_dqdetach(mp->m_rootip);
|
||||
if (mp->m_rbmip)
|
||||
xfs_qm_dqdetach(mp->m_rbmip);
|
||||
if (mp->m_rsumip)
|
||||
xfs_qm_dqdetach(mp->m_rsumip);
|
||||
|
||||
/*
|
||||
* For pre-RTG file systems, the RT inodes have quotas attached,
|
||||
* detach them now.
|
||||
*/
|
||||
if (!xfs_has_rtgroups(mp))
|
||||
xfs_qm_unmount_rt(mp);
|
||||
|
||||
/*
|
||||
* Release the quota inodes.
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include "xfs_health.h"
|
||||
#include "xfs_da_format.h"
|
||||
#include "xfs_metafile.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
#include "xfs_error.h"
|
||||
|
||||
/*
|
||||
* Return whether there are any free extents in the size range given
|
||||
@ -40,14 +42,14 @@ xfs_rtany_summary(
|
||||
xfs_fileoff_t bbno, /* bitmap block number */
|
||||
int *maxlog) /* out: max log2 extent size free */
|
||||
{
|
||||
struct xfs_mount *mp = args->mp;
|
||||
uint8_t *rsum_cache = args->rtg->rtg_rsum_cache;
|
||||
int error;
|
||||
int log; /* loop counter, log2 of ext. size */
|
||||
xfs_suminfo_t sum; /* summary data */
|
||||
|
||||
/* There are no extents at levels >= m_rsum_cache[bbno]. */
|
||||
if (mp->m_rsum_cache) {
|
||||
high = min(high, mp->m_rsum_cache[bbno] - 1);
|
||||
/* There are no extents at levels >= rsum_cache[bbno]. */
|
||||
if (rsum_cache) {
|
||||
high = min(high, rsum_cache[bbno] - 1);
|
||||
if (low > high) {
|
||||
*maxlog = -1;
|
||||
return 0;
|
||||
@ -79,12 +81,11 @@ xfs_rtany_summary(
|
||||
*maxlog = -1;
|
||||
out:
|
||||
/* There were no extents at levels > log. */
|
||||
if (mp->m_rsum_cache && log + 1 < mp->m_rsum_cache[bbno])
|
||||
mp->m_rsum_cache[bbno] = log + 1;
|
||||
if (rsum_cache && log + 1 < rsum_cache[bbno])
|
||||
rsum_cache[bbno] = log + 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Copy and transform the summary file, given the old and new
|
||||
* parameters in the mount structures.
|
||||
@ -151,7 +152,7 @@ xfs_rtallocate_range(
|
||||
/*
|
||||
* Find the next allocated block (end of free extent).
|
||||
*/
|
||||
error = xfs_rtfind_forw(args, end, mp->m_sb.sb_rextents - 1,
|
||||
error = xfs_rtfind_forw(args, end, args->rtg->rtg_extents - 1,
|
||||
&postblock);
|
||||
if (error)
|
||||
return error;
|
||||
@ -213,14 +214,14 @@ xfs_rtalloc_align_len(
|
||||
*/
|
||||
static inline xfs_rtxlen_t
|
||||
xfs_rtallocate_clamp_len(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
xfs_rtxnum_t startrtx,
|
||||
xfs_rtxlen_t rtxlen,
|
||||
xfs_rtxlen_t prod)
|
||||
{
|
||||
xfs_rtxlen_t ret;
|
||||
|
||||
ret = min(mp->m_sb.sb_rextents, startrtx + rtxlen) - startrtx;
|
||||
ret = min(rtg->rtg_extents, startrtx + rtxlen) - startrtx;
|
||||
return xfs_rtalloc_align_len(ret, prod);
|
||||
}
|
||||
|
||||
@ -255,10 +256,11 @@ xfs_rtallocate_extent_block(
|
||||
* Loop over all the extents starting in this bitmap block up to the
|
||||
* end of the rt volume, looking for one that's long enough.
|
||||
*/
|
||||
end = min(mp->m_sb.sb_rextents, xfs_rbmblock_to_rtx(mp, bbno + 1)) - 1;
|
||||
end = min(args->rtg->rtg_extents, xfs_rbmblock_to_rtx(mp, bbno + 1)) -
|
||||
1;
|
||||
for (i = xfs_rbmblock_to_rtx(mp, bbno); i <= end; i++) {
|
||||
/* Make sure we don't scan off the end of the rt volume. */
|
||||
scanlen = xfs_rtallocate_clamp_len(mp, i, maxlen, prod);
|
||||
scanlen = xfs_rtallocate_clamp_len(args->rtg, i, maxlen, prod);
|
||||
if (scanlen < minlen)
|
||||
break;
|
||||
|
||||
@ -343,7 +345,6 @@ xfs_rtallocate_extent_exact(
|
||||
xfs_rtxlen_t prod, /* extent product factor */
|
||||
xfs_rtxnum_t *rtx) /* out: start rtext allocated */
|
||||
{
|
||||
struct xfs_mount *mp = args->mp;
|
||||
xfs_rtxnum_t next; /* next rtext to try (dummy) */
|
||||
xfs_rtxlen_t alloclen; /* candidate length */
|
||||
xfs_rtxlen_t scanlen; /* number of free rtx to look for */
|
||||
@ -354,7 +355,7 @@ xfs_rtallocate_extent_exact(
|
||||
ASSERT(maxlen % prod == 0);
|
||||
|
||||
/* Make sure we don't run off the end of the rt volume. */
|
||||
scanlen = xfs_rtallocate_clamp_len(mp, start, maxlen, prod);
|
||||
scanlen = xfs_rtallocate_clamp_len(args->rtg, start, maxlen, prod);
|
||||
if (scanlen < minlen)
|
||||
return -ENOSPC;
|
||||
|
||||
@ -415,11 +416,10 @@ xfs_rtallocate_extent_near(
|
||||
ASSERT(maxlen % prod == 0);
|
||||
|
||||
/*
|
||||
* If the block number given is off the end, silently set it to
|
||||
* the last block.
|
||||
* If the block number given is off the end, silently set it to the last
|
||||
* block.
|
||||
*/
|
||||
if (start >= mp->m_sb.sb_rextents)
|
||||
start = mp->m_sb.sb_rextents - 1;
|
||||
start = min(start, args->rtg->rtg_extents - 1);
|
||||
|
||||
/*
|
||||
* Try the exact allocation first.
|
||||
@ -651,19 +651,30 @@ xfs_rtallocate_extent_size(
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_rtunmount_rtg(
|
||||
struct xfs_rtgroup *rtg)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < XFS_RTGI_MAX; i++)
|
||||
xfs_rtginode_irele(&rtg->rtg_inodes[i]);
|
||||
kvfree(rtg->rtg_rsum_cache);
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_alloc_rsum_cache(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
xfs_extlen_t rbmblocks)
|
||||
{
|
||||
/*
|
||||
* The rsum cache is initialized to the maximum value, which is
|
||||
* trivially an upper bound on the maximum level with any free extents.
|
||||
*/
|
||||
mp->m_rsum_cache = kvmalloc(rbmblocks, GFP_KERNEL);
|
||||
if (!mp->m_rsum_cache)
|
||||
rtg->rtg_rsum_cache = kvmalloc(rbmblocks, GFP_KERNEL);
|
||||
if (!rtg->rtg_rsum_cache)
|
||||
return -ENOMEM;
|
||||
memset(mp->m_rsum_cache, -1, rbmblocks);
|
||||
memset(rtg->rtg_rsum_cache, -1, rbmblocks);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -700,44 +711,88 @@ out_iolock:
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Ensure that the rtgroup metadata inode is loaded, creating it if neeeded. */
|
||||
static int
|
||||
xfs_rtginode_ensure(
|
||||
struct xfs_rtgroup *rtg,
|
||||
enum xfs_rtg_inodes type)
|
||||
{
|
||||
struct xfs_trans *tp;
|
||||
int error;
|
||||
|
||||
if (rtg->rtg_inodes[type])
|
||||
return 0;
|
||||
|
||||
error = xfs_trans_alloc_empty(rtg_mount(rtg), &tp);
|
||||
if (error)
|
||||
return error;
|
||||
error = xfs_rtginode_load(rtg, type, tp);
|
||||
xfs_trans_cancel(tp);
|
||||
|
||||
if (error != -ENOENT)
|
||||
return 0;
|
||||
return xfs_rtginode_create(rtg, type, true);
|
||||
}
|
||||
|
||||
static struct xfs_mount *
|
||||
xfs_growfs_rt_alloc_fake_mount(
|
||||
const struct xfs_mount *mp,
|
||||
xfs_rfsblock_t rblocks,
|
||||
xfs_agblock_t rextsize)
|
||||
{
|
||||
struct xfs_mount *nmp;
|
||||
|
||||
nmp = kmemdup(mp, sizeof(*mp), GFP_KERNEL);
|
||||
if (!nmp)
|
||||
return NULL;
|
||||
nmp->m_sb.sb_rextsize = rextsize;
|
||||
xfs_mount_sb_set_rextsize(nmp, &nmp->m_sb);
|
||||
nmp->m_sb.sb_rblocks = rblocks;
|
||||
nmp->m_sb.sb_rextents = xfs_blen_to_rtbxlen(nmp, nmp->m_sb.sb_rblocks);
|
||||
nmp->m_sb.sb_rbmblocks = xfs_rtbitmap_blockcount(nmp);
|
||||
nmp->m_sb.sb_rextslog = xfs_compute_rextslog(nmp->m_sb.sb_rextents);
|
||||
nmp->m_rsumblocks = xfs_rtsummary_blockcount(nmp, &nmp->m_rsumlevels);
|
||||
|
||||
if (rblocks > 0)
|
||||
nmp->m_features |= XFS_FEAT_REALTIME;
|
||||
|
||||
/* recompute growfsrt reservation from new rsumsize */
|
||||
xfs_trans_resv_calc(nmp, &nmp->m_resv);
|
||||
return nmp;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_growfs_rt_bmblock(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
xfs_rfsblock_t nrblocks,
|
||||
xfs_agblock_t rextsize,
|
||||
xfs_fileoff_t bmbno)
|
||||
{
|
||||
struct xfs_inode *rbmip = mp->m_rbmip;
|
||||
struct xfs_inode *rsumip = mp->m_rsumip;
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
|
||||
struct xfs_inode *rsumip = rtg->rtg_inodes[XFS_RTGI_SUMMARY];
|
||||
struct xfs_rtalloc_args args = {
|
||||
.mp = mp,
|
||||
.rtg = rtg,
|
||||
};
|
||||
struct xfs_rtalloc_args nargs = {
|
||||
.rtg = rtg,
|
||||
};
|
||||
struct xfs_mount *nmp;
|
||||
xfs_rfsblock_t nrblocks_step;
|
||||
xfs_rtbxlen_t freed_rtx;
|
||||
int error;
|
||||
|
||||
|
||||
nrblocks_step = (bmbno + 1) * NBBY * mp->m_sb.sb_blocksize * rextsize;
|
||||
|
||||
nmp = nargs.mp = kmemdup(mp, sizeof(*mp), GFP_KERNEL);
|
||||
if (!nmp)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Calculate new sb and mount fields for this round.
|
||||
*/
|
||||
nmp->m_sb.sb_rextsize = rextsize;
|
||||
xfs_mount_sb_set_rextsize(nmp, &nmp->m_sb);
|
||||
nmp->m_sb.sb_rbmblocks = bmbno + 1;
|
||||
nmp->m_sb.sb_rblocks = min(nrblocks, nrblocks_step);
|
||||
nmp->m_sb.sb_rextents = xfs_rtb_to_rtx(nmp, nmp->m_sb.sb_rblocks);
|
||||
nmp->m_sb.sb_rextslog = xfs_compute_rextslog(nmp->m_sb.sb_rextents);
|
||||
nmp->m_rsumlevels = nmp->m_sb.sb_rextslog + 1;
|
||||
nmp->m_rsumblocks = xfs_rtsummary_blockcount(mp, nmp->m_rsumlevels,
|
||||
nmp->m_sb.sb_rbmblocks);
|
||||
nrblocks_step = (bmbno + 1) * NBBY * mp->m_sb.sb_blocksize * rextsize;
|
||||
nmp = nargs.mp = xfs_growfs_rt_alloc_fake_mount(mp,
|
||||
min(nrblocks, nrblocks_step), rextsize);
|
||||
if (!nmp)
|
||||
return -ENOMEM;
|
||||
|
||||
rtg->rtg_extents = xfs_rtgroup_extents(nmp, rtg_rgno(rtg));
|
||||
|
||||
/*
|
||||
* Recompute the growfsrt reservation from the new rsumsize, so that the
|
||||
@ -750,8 +805,8 @@ xfs_growfs_rt_bmblock(
|
||||
goto out_free;
|
||||
nargs.tp = args.tp;
|
||||
|
||||
xfs_rtbitmap_lock(mp);
|
||||
xfs_rtbitmap_trans_join(args.tp);
|
||||
xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP);
|
||||
xfs_rtgroup_trans_join(args.tp, args.rtg, XFS_RTGLOCK_BITMAP);
|
||||
|
||||
/*
|
||||
* Update the bitmap inode's size ondisk and incore. We need to update
|
||||
@ -853,8 +908,9 @@ out_free:
|
||||
*/
|
||||
static xfs_fileoff_t
|
||||
xfs_last_rt_bmblock(
|
||||
struct xfs_mount *mp)
|
||||
struct xfs_rtgroup *rtg)
|
||||
{
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
xfs_fileoff_t bmbno = mp->m_sb.sb_rbmblocks;
|
||||
|
||||
/* Skip the current block if it is exactly full. */
|
||||
@ -863,6 +919,132 @@ xfs_last_rt_bmblock(
|
||||
return bmbno;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate space to the bitmap and summary files, as necessary.
|
||||
*/
|
||||
static int
|
||||
xfs_growfs_rt_alloc_blocks(
|
||||
struct xfs_rtgroup *rtg,
|
||||
xfs_rfsblock_t nrblocks,
|
||||
xfs_agblock_t rextsize,
|
||||
xfs_extlen_t *nrbmblocks)
|
||||
{
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
|
||||
struct xfs_inode *rsumip = rtg->rtg_inodes[XFS_RTGI_SUMMARY];
|
||||
xfs_extlen_t orbmblocks;
|
||||
xfs_extlen_t orsumblocks;
|
||||
xfs_extlen_t nrsumblocks;
|
||||
struct xfs_mount *nmp;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Get the old block counts for bitmap and summary inodes.
|
||||
* These can't change since other growfs callers are locked out.
|
||||
*/
|
||||
orbmblocks = XFS_B_TO_FSB(mp, rbmip->i_disk_size);
|
||||
orsumblocks = XFS_B_TO_FSB(mp, rsumip->i_disk_size);
|
||||
|
||||
nmp = xfs_growfs_rt_alloc_fake_mount(mp, nrblocks, rextsize);
|
||||
if (!nmp)
|
||||
return -ENOMEM;
|
||||
|
||||
*nrbmblocks = nmp->m_sb.sb_rbmblocks;
|
||||
nrsumblocks = nmp->m_rsumblocks;
|
||||
kfree(nmp);
|
||||
|
||||
error = xfs_rtfile_initialize_blocks(rtg, XFS_RTGI_BITMAP, orbmblocks,
|
||||
*nrbmblocks, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
return xfs_rtfile_initialize_blocks(rtg, XFS_RTGI_SUMMARY, orsumblocks,
|
||||
nrsumblocks, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_growfs_rtg(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rfsblock_t nrblocks,
|
||||
xfs_agblock_t rextsize)
|
||||
{
|
||||
uint8_t *old_rsum_cache = NULL;
|
||||
xfs_extlen_t bmblocks;
|
||||
xfs_fileoff_t bmbno;
|
||||
struct xfs_rtgroup *rtg;
|
||||
unsigned int i;
|
||||
int error;
|
||||
|
||||
rtg = xfs_rtgroup_grab(mp, 0);
|
||||
if (!rtg)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < XFS_RTGI_MAX; i++) {
|
||||
error = xfs_rtginode_ensure(rtg, i);
|
||||
if (error)
|
||||
goto out_rele;
|
||||
}
|
||||
|
||||
error = xfs_growfs_rt_alloc_blocks(rtg, nrblocks, rextsize, &bmblocks);
|
||||
if (error)
|
||||
goto out_rele;
|
||||
|
||||
if (bmblocks != rtg_mount(rtg)->m_sb.sb_rbmblocks) {
|
||||
old_rsum_cache = rtg->rtg_rsum_cache;
|
||||
error = xfs_alloc_rsum_cache(rtg, bmblocks);
|
||||
if (error)
|
||||
goto out_rele;
|
||||
}
|
||||
|
||||
for (bmbno = xfs_last_rt_bmblock(rtg); bmbno < bmblocks; bmbno++) {
|
||||
error = xfs_growfs_rt_bmblock(rtg, nrblocks, rextsize, bmbno);
|
||||
if (error)
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
if (old_rsum_cache)
|
||||
kvfree(old_rsum_cache);
|
||||
xfs_rtgroup_rele(rtg);
|
||||
return 0;
|
||||
|
||||
out_error:
|
||||
/*
|
||||
* Reset rtg_extents to the old value if adding more blocks failed.
|
||||
*/
|
||||
rtg->rtg_extents = xfs_rtgroup_extents(rtg_mount(rtg), rtg_rgno(rtg));
|
||||
if (old_rsum_cache) {
|
||||
kvfree(rtg->rtg_rsum_cache);
|
||||
rtg->rtg_rsum_cache = old_rsum_cache;
|
||||
}
|
||||
out_rele:
|
||||
xfs_rtgroup_rele(rtg);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_growfs_check_rtgeom(
|
||||
const struct xfs_mount *mp,
|
||||
xfs_rfsblock_t rblocks,
|
||||
xfs_extlen_t rextsize)
|
||||
{
|
||||
struct xfs_mount *nmp;
|
||||
int error = 0;
|
||||
|
||||
nmp = xfs_growfs_rt_alloc_fake_mount(mp, rblocks, rextsize);
|
||||
if (!nmp)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* New summary size can't be more than half the size of the log. This
|
||||
* prevents us from getting a log overflow, since we'll log basically
|
||||
* the whole summary file at once.
|
||||
*/
|
||||
if (nmp->m_rsumblocks > (mp->m_sb.sb_logblocks >> 1))
|
||||
error = -EINVAL;
|
||||
|
||||
kfree(nmp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Grow the realtime area of the filesystem.
|
||||
*/
|
||||
@ -871,16 +1053,9 @@ xfs_growfs_rt(
|
||||
xfs_mount_t *mp, /* mount point for filesystem */
|
||||
xfs_growfs_rt_t *in) /* growfs rt input struct */
|
||||
{
|
||||
xfs_fileoff_t bmbno; /* bitmap block number */
|
||||
struct xfs_buf *bp; /* temporary buffer */
|
||||
int error; /* error return value */
|
||||
xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */
|
||||
xfs_rtxnum_t nrextents; /* new number of realtime extents */
|
||||
xfs_extlen_t nrsumblocks; /* new number of summary blocks */
|
||||
xfs_extlen_t rbmblocks; /* current number of rt bitmap blocks */
|
||||
xfs_extlen_t rsumblocks; /* current number of rt summary blks */
|
||||
uint8_t *rsum_cache; /* old summary cache */
|
||||
xfs_agblock_t old_rextsize = mp->m_sb.sb_rextsize;
|
||||
struct xfs_buf *bp;
|
||||
xfs_agblock_t old_rextsize = mp->m_sb.sb_rextsize;
|
||||
int error;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
@ -891,15 +1066,9 @@ xfs_growfs_rt(
|
||||
|
||||
if (!mutex_trylock(&mp->m_growlock))
|
||||
return -EWOULDBLOCK;
|
||||
/*
|
||||
* Mount should fail if the rt bitmap/summary files don't load, but
|
||||
* we'll check anyway.
|
||||
*/
|
||||
error = -EINVAL;
|
||||
if (!mp->m_rbmip || !mp->m_rsumip)
|
||||
goto out_unlock;
|
||||
|
||||
/* Shrink not supported. */
|
||||
error = -EINVAL;
|
||||
if (in->newblocks <= mp->m_sb.sb_rblocks)
|
||||
goto out_unlock;
|
||||
/* Can only change rt extent size when adding rt volume. */
|
||||
@ -932,82 +1101,28 @@ xfs_growfs_rt(
|
||||
/*
|
||||
* Calculate new parameters. These are the final values to be reached.
|
||||
*/
|
||||
nrextents = div_u64(in->newblocks, in->extsize);
|
||||
if (nrextents == 0) {
|
||||
error = -EINVAL;
|
||||
error = -EINVAL;
|
||||
if (in->newblocks < in->extsize)
|
||||
goto out_unlock;
|
||||
}
|
||||
nrbmblocks = xfs_rtbitmap_blockcount(mp, nrextents);
|
||||
nrsumblocks = xfs_rtsummary_blockcount(mp,
|
||||
xfs_compute_rextslog(nrextents) + 1, nrbmblocks);
|
||||
|
||||
/*
|
||||
* New summary size can't be more than half the size of
|
||||
* the log. This prevents us from getting a log overflow,
|
||||
* since we'll log basically the whole summary file at once.
|
||||
*/
|
||||
if (nrsumblocks > (mp->m_sb.sb_logblocks >> 1)) {
|
||||
error = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the old block counts for bitmap and summary inodes.
|
||||
* These can't change since other growfs callers are locked out.
|
||||
*/
|
||||
rbmblocks = XFS_B_TO_FSB(mp, mp->m_rbmip->i_disk_size);
|
||||
rsumblocks = XFS_B_TO_FSB(mp, mp->m_rsumip->i_disk_size);
|
||||
/*
|
||||
* Allocate space to the bitmap and summary files, as necessary.
|
||||
*/
|
||||
error = xfs_rtfile_initialize_blocks(mp->m_rbmip, rbmblocks,
|
||||
nrbmblocks, NULL);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
error = xfs_rtfile_initialize_blocks(mp->m_rsumip, rsumblocks,
|
||||
nrsumblocks, NULL);
|
||||
/* Make sure the new fs size won't cause problems with the log. */
|
||||
error = xfs_growfs_check_rtgeom(mp, in->newblocks, in->extsize);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
rsum_cache = mp->m_rsum_cache;
|
||||
if (nrbmblocks != mp->m_sb.sb_rbmblocks) {
|
||||
error = xfs_alloc_rsum_cache(mp, nrbmblocks);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Initialize the free space bitmap one bitmap block at a time. */
|
||||
for (bmbno = xfs_last_rt_bmblock(mp); bmbno < nrbmblocks; bmbno++) {
|
||||
error = xfs_growfs_rt_bmblock(mp, in->newblocks, in->extsize,
|
||||
bmbno);
|
||||
if (error)
|
||||
goto out_free;
|
||||
}
|
||||
error = xfs_growfs_rtg(mp, in->newblocks, in->extsize);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
if (old_rextsize != in->extsize) {
|
||||
error = xfs_growfs_rt_fixup_extsize(mp);
|
||||
if (error)
|
||||
goto out_free;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Update secondary superblocks now the physical grow has completed */
|
||||
error = xfs_update_secondary_sbs(mp);
|
||||
|
||||
out_free:
|
||||
/*
|
||||
* If we had to allocate a new rsum_cache, we either need to free the
|
||||
* old one (if we succeeded) or free the new one and restore the old one
|
||||
* (if there was an error).
|
||||
*/
|
||||
if (rsum_cache != mp->m_rsum_cache) {
|
||||
if (error) {
|
||||
kvfree(mp->m_rsum_cache);
|
||||
mp->m_rsum_cache = rsum_cache;
|
||||
} else {
|
||||
kvfree(rsum_cache);
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&mp->m_growlock);
|
||||
return error;
|
||||
@ -1021,22 +1136,19 @@ xfs_rtmount_init(
|
||||
struct xfs_mount *mp) /* file system mount structure */
|
||||
{
|
||||
struct xfs_buf *bp; /* buffer for last block of subvolume */
|
||||
struct xfs_sb *sbp; /* filesystem superblock copy in mount */
|
||||
xfs_daddr_t d; /* address of last block of subvolume */
|
||||
int error;
|
||||
|
||||
sbp = &mp->m_sb;
|
||||
if (sbp->sb_rblocks == 0)
|
||||
if (mp->m_sb.sb_rblocks == 0)
|
||||
return 0;
|
||||
if (mp->m_rtdev_targp == NULL) {
|
||||
xfs_warn(mp,
|
||||
"Filesystem has a realtime volume, use rtdev=device option");
|
||||
return -ENODEV;
|
||||
}
|
||||
mp->m_rsumlevels = sbp->sb_rextslog + 1;
|
||||
mp->m_rsumblocks = xfs_rtsummary_blockcount(mp, mp->m_rsumlevels,
|
||||
mp->m_sb.sb_rbmblocks);
|
||||
mp->m_rbmip = mp->m_rsumip = NULL;
|
||||
|
||||
mp->m_rsumblocks = xfs_rtsummary_blockcount(mp, &mp->m_rsumlevels);
|
||||
|
||||
/*
|
||||
* Check that the realtime section is an ok size.
|
||||
*/
|
||||
@ -1060,7 +1172,7 @@ xfs_rtmount_init(
|
||||
|
||||
static int
|
||||
xfs_rtalloc_count_frextent(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_trans *tp,
|
||||
const struct xfs_rtalloc_rec *rec,
|
||||
void *priv)
|
||||
@ -1082,12 +1194,18 @@ xfs_rtalloc_reinit_frextents(
|
||||
uint64_t val = 0;
|
||||
int error;
|
||||
|
||||
xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP);
|
||||
error = xfs_rtalloc_query_all(mp, NULL, xfs_rtalloc_count_frextent,
|
||||
&val);
|
||||
xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
|
||||
if (error)
|
||||
return error;
|
||||
struct xfs_rtgroup *rtg = NULL;
|
||||
|
||||
while ((rtg = xfs_rtgroup_next(mp, rtg))) {
|
||||
xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
|
||||
error = xfs_rtalloc_query_all(rtg, NULL,
|
||||
xfs_rtalloc_count_frextent, &val);
|
||||
xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED);
|
||||
if (error) {
|
||||
xfs_rtgroup_rele(rtg);
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&mp->m_sb_lock);
|
||||
mp->m_sb.sb_frextents = val;
|
||||
@ -1104,12 +1222,11 @@ xfs_rtalloc_reinit_frextents(
|
||||
static inline int
|
||||
xfs_rtmount_iread_extents(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
unsigned int lock_class)
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
int error;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL | lock_class);
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
|
||||
error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
@ -1122,10 +1239,36 @@ xfs_rtmount_iread_extents(
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL | lock_class);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_rtmount_rtg(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_rtgroup *rtg)
|
||||
{
|
||||
int error, i;
|
||||
|
||||
rtg->rtg_extents = xfs_rtgroup_extents(mp, rtg_rgno(rtg));
|
||||
|
||||
for (i = 0; i < XFS_RTGI_MAX; i++) {
|
||||
error = xfs_rtginode_load(rtg, i, tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (rtg->rtg_inodes[i]) {
|
||||
error = xfs_rtmount_iread_extents(tp,
|
||||
rtg->rtg_inodes[i]);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
return xfs_alloc_rsum_cache(rtg, mp->m_sb.sb_rbmblocks);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the bitmap and summary inodes and the summary cache into the mount
|
||||
* structure at mount time.
|
||||
@ -1135,48 +1278,29 @@ xfs_rtmount_inodes(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
struct xfs_trans *tp;
|
||||
struct xfs_sb *sbp = &mp->m_sb;
|
||||
struct xfs_rtgroup *rtg = NULL;
|
||||
int error;
|
||||
|
||||
error = xfs_trans_alloc_empty(mp, &tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfs_trans_metafile_iget(tp, mp->m_sb.sb_rbmino,
|
||||
XFS_METAFILE_RTBITMAP, &mp->m_rbmip);
|
||||
if (xfs_metadata_is_sick(error))
|
||||
xfs_rt_mark_sick(mp, XFS_SICK_RT_BITMAP);
|
||||
if (error)
|
||||
goto out_trans;
|
||||
ASSERT(mp->m_rbmip != NULL);
|
||||
if (xfs_has_rtgroups(mp) && mp->m_sb.sb_rgcount > 0) {
|
||||
error = xfs_rtginode_load_parent(tp);
|
||||
if (error)
|
||||
goto out_cancel;
|
||||
}
|
||||
|
||||
error = xfs_rtmount_iread_extents(tp, mp->m_rbmip, XFS_ILOCK_RTBITMAP);
|
||||
if (error)
|
||||
goto out_rele_bitmap;
|
||||
while ((rtg = xfs_rtgroup_next(mp, rtg))) {
|
||||
error = xfs_rtmount_rtg(mp, tp, rtg);
|
||||
if (error) {
|
||||
xfs_rtgroup_rele(rtg);
|
||||
xfs_rtunmount_inodes(mp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
error = xfs_trans_metafile_iget(tp, mp->m_sb.sb_rsumino,
|
||||
XFS_METAFILE_RTSUMMARY, &mp->m_rsumip);
|
||||
if (xfs_metadata_is_sick(error))
|
||||
xfs_rt_mark_sick(mp, XFS_SICK_RT_SUMMARY);
|
||||
if (error)
|
||||
goto out_rele_bitmap;
|
||||
ASSERT(mp->m_rsumip != NULL);
|
||||
|
||||
error = xfs_rtmount_iread_extents(tp, mp->m_rsumip, XFS_ILOCK_RTSUM);
|
||||
if (error)
|
||||
goto out_rele_summary;
|
||||
|
||||
error = xfs_alloc_rsum_cache(mp, sbp->sb_rbmblocks);
|
||||
if (error)
|
||||
goto out_rele_summary;
|
||||
xfs_trans_cancel(tp);
|
||||
return 0;
|
||||
|
||||
out_rele_summary:
|
||||
xfs_irele(mp->m_rsumip);
|
||||
out_rele_bitmap:
|
||||
xfs_irele(mp->m_rbmip);
|
||||
out_trans:
|
||||
out_cancel:
|
||||
xfs_trans_cancel(tp);
|
||||
return error;
|
||||
}
|
||||
@ -1185,11 +1309,11 @@ void
|
||||
xfs_rtunmount_inodes(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
kvfree(mp->m_rsum_cache);
|
||||
if (mp->m_rbmip)
|
||||
xfs_irele(mp->m_rbmip);
|
||||
if (mp->m_rsumip)
|
||||
xfs_irele(mp->m_rsumip);
|
||||
struct xfs_rtgroup *rtg = NULL;
|
||||
|
||||
while ((rtg = xfs_rtgroup_next(mp, rtg)))
|
||||
xfs_rtunmount_rtg(rtg);
|
||||
xfs_rtginode_irele(&mp->m_rtdirip);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1201,28 +1325,29 @@ xfs_rtunmount_inodes(
|
||||
*/
|
||||
static xfs_rtxnum_t
|
||||
xfs_rtpick_extent(
|
||||
xfs_mount_t *mp, /* file system mount point */
|
||||
xfs_trans_t *tp, /* transaction pointer */
|
||||
struct xfs_rtgroup *rtg,
|
||||
struct xfs_trans *tp,
|
||||
xfs_rtxlen_t len) /* allocation length (rtextents) */
|
||||
{
|
||||
xfs_rtxnum_t b; /* result rtext */
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP];
|
||||
xfs_rtxnum_t b = 0; /* result rtext */
|
||||
int log2; /* log of sequence number */
|
||||
uint64_t resid; /* residual after log removed */
|
||||
uint64_t seq; /* sequence number of file creation */
|
||||
struct timespec64 ts; /* timespec in inode */
|
||||
|
||||
xfs_assert_ilocked(mp->m_rbmip, XFS_ILOCK_EXCL);
|
||||
xfs_assert_ilocked(rbmip, XFS_ILOCK_EXCL);
|
||||
|
||||
ts = inode_get_atime(VFS_I(mp->m_rbmip));
|
||||
if (!(mp->m_rbmip->i_diflags & XFS_DIFLAG_NEWRTBM)) {
|
||||
mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
|
||||
ts = inode_get_atime(VFS_I(rbmip));
|
||||
if (!(rbmip->i_diflags & XFS_DIFLAG_NEWRTBM)) {
|
||||
rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
|
||||
seq = 0;
|
||||
} else {
|
||||
seq = ts.tv_sec;
|
||||
}
|
||||
if ((log2 = xfs_highbit64(seq)) == -1)
|
||||
b = 0;
|
||||
else {
|
||||
log2 = xfs_highbit64(seq);
|
||||
if (log2 != -1) {
|
||||
resid = seq - (1ULL << log2);
|
||||
b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >>
|
||||
(log2 + 1);
|
||||
@ -1232,8 +1357,8 @@ xfs_rtpick_extent(
|
||||
b = mp->m_sb.sb_rextents - len;
|
||||
}
|
||||
ts.tv_sec = seq + 1;
|
||||
inode_set_atime_to_ts(VFS_I(mp->m_rbmip), ts);
|
||||
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
|
||||
inode_set_atime_to_ts(VFS_I(rbmip), ts);
|
||||
xfs_trans_log_inode(tp, rbmip, XFS_ILOG_CORE);
|
||||
return b;
|
||||
}
|
||||
|
||||
@ -1288,12 +1413,16 @@ xfs_rtallocate(
|
||||
xfs_rtxlen_t len = 0;
|
||||
int error = 0;
|
||||
|
||||
args.rtg = xfs_rtgroup_grab(args.mp, 0);
|
||||
if (!args.rtg)
|
||||
return -ENOSPC;
|
||||
|
||||
/*
|
||||
* Lock out modifications to both the RT bitmap and summary inodes.
|
||||
*/
|
||||
if (!*rtlocked) {
|
||||
xfs_rtbitmap_lock(args.mp);
|
||||
xfs_rtbitmap_trans_join(tp);
|
||||
xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP);
|
||||
xfs_rtgroup_trans_join(tp, args.rtg, XFS_RTGLOCK_BITMAP);
|
||||
*rtlocked = true;
|
||||
}
|
||||
|
||||
@ -1304,7 +1433,7 @@ xfs_rtallocate(
|
||||
if (bno_hint)
|
||||
start = xfs_rtb_to_rtx(args.mp, bno_hint);
|
||||
else if (initial_user_data)
|
||||
start = xfs_rtpick_extent(args.mp, tp, maxlen);
|
||||
start = xfs_rtpick_extent(args.rtg, tp, maxlen);
|
||||
|
||||
if (start) {
|
||||
error = xfs_rtallocate_extent_near(&args, start, minlen, maxlen,
|
||||
@ -1334,10 +1463,11 @@ xfs_rtallocate(
|
||||
xfs_trans_mod_sb(tp, wasdel ?
|
||||
XFS_TRANS_SB_RES_FREXTENTS : XFS_TRANS_SB_FREXTENTS,
|
||||
-(long)len);
|
||||
*bno = xfs_rtx_to_rtb(args.mp, rtx);
|
||||
*bno = xfs_rtx_to_rtb(args.rtg, rtx);
|
||||
*blen = xfs_rtxlen_to_extlen(args.mp, len);
|
||||
|
||||
out_release:
|
||||
xfs_rtgroup_rele(args.rtg);
|
||||
xfs_rtbuf_cache_relse(&args);
|
||||
return error;
|
||||
}
|
||||
|
@ -885,7 +885,8 @@ xfs_fs_statfs(
|
||||
|
||||
statp->f_blocks = sbp->sb_rblocks;
|
||||
freertx = percpu_counter_sum_positive(&mp->m_frextents);
|
||||
statp->f_bavail = statp->f_bfree = xfs_rtx_to_rtb(mp, freertx);
|
||||
statp->f_bavail = statp->f_bfree =
|
||||
xfs_rtbxlen_to_blen(mp, freertx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include "xfs_refcount.h"
|
||||
#include "xfs_metafile.h"
|
||||
#include "xfs_metadir.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
|
||||
/*
|
||||
* We include this last to have the helpers above available for the trace
|
||||
|
@ -72,6 +72,7 @@ struct xfs_btree_cur;
|
||||
struct xfs_defer_op_type;
|
||||
struct xfs_refcount_irec;
|
||||
struct xfs_fsmap;
|
||||
struct xfs_fsmap_irec;
|
||||
struct xfs_group;
|
||||
struct xfs_rmap_irec;
|
||||
struct xfs_icreate_log;
|
||||
@ -218,6 +219,7 @@ DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag);
|
||||
DEFINE_PERAG_REF_EVENT(xfs_reclaim_inodes_count);
|
||||
|
||||
TRACE_DEFINE_ENUM(XG_TYPE_AG);
|
||||
TRACE_DEFINE_ENUM(XG_TYPE_RTG);
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_group_class,
|
||||
TP_PROTO(struct xfs_group *xg, unsigned long caller_ip),
|
||||
@ -3881,16 +3883,17 @@ DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap_piece);
|
||||
DEFINE_INODE_ERROR_EVENT(xfs_swap_extent_rmap_error);
|
||||
|
||||
/* fsmap traces */
|
||||
DECLARE_EVENT_CLASS(xfs_fsmap_class,
|
||||
TRACE_EVENT(xfs_fsmap_mapping,
|
||||
TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno,
|
||||
const struct xfs_rmap_irec *rmap),
|
||||
TP_ARGS(mp, keydev, agno, rmap),
|
||||
const struct xfs_fsmap_irec *frec),
|
||||
TP_ARGS(mp, keydev, agno, frec),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(dev_t, keydev)
|
||||
__field(xfs_agnumber_t, agno)
|
||||
__field(xfs_fsblock_t, bno)
|
||||
__field(xfs_filblks_t, len)
|
||||
__field(xfs_agblock_t, agbno)
|
||||
__field(xfs_daddr_t, start_daddr)
|
||||
__field(xfs_daddr_t, len_daddr)
|
||||
__field(uint64_t, owner)
|
||||
__field(uint64_t, offset)
|
||||
__field(unsigned int, flags)
|
||||
@ -3899,33 +3902,66 @@ DECLARE_EVENT_CLASS(xfs_fsmap_class,
|
||||
__entry->dev = mp->m_super->s_dev;
|
||||
__entry->keydev = new_decode_dev(keydev);
|
||||
__entry->agno = agno;
|
||||
__entry->bno = rmap->rm_startblock;
|
||||
__entry->len = rmap->rm_blockcount;
|
||||
__entry->agbno = frec->rec_key;
|
||||
__entry->start_daddr = frec->start_daddr;
|
||||
__entry->len_daddr = frec->len_daddr;
|
||||
__entry->owner = frec->owner;
|
||||
__entry->offset = frec->offset;
|
||||
__entry->flags = frec->rm_flags;
|
||||
),
|
||||
TP_printk("dev %d:%d keydev %d:%d agno 0x%x rmapbno 0x%x start_daddr 0x%llx len_daddr 0x%llx owner 0x%llx fileoff 0x%llx flags 0x%x",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
MAJOR(__entry->keydev), MINOR(__entry->keydev),
|
||||
__entry->agno,
|
||||
__entry->agbno,
|
||||
__entry->start_daddr,
|
||||
__entry->len_daddr,
|
||||
__entry->owner,
|
||||
__entry->offset,
|
||||
__entry->flags)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_fsmap_group_key_class,
|
||||
TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno,
|
||||
const struct xfs_rmap_irec *rmap),
|
||||
TP_ARGS(mp, keydev, agno, rmap),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(dev_t, keydev)
|
||||
__field(xfs_agnumber_t, agno)
|
||||
__field(xfs_agblock_t, agbno)
|
||||
__field(uint64_t, owner)
|
||||
__field(uint64_t, offset)
|
||||
__field(unsigned int, flags)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = mp->m_super->s_dev;
|
||||
__entry->keydev = new_decode_dev(keydev);
|
||||
__entry->agno = agno;
|
||||
__entry->agbno = rmap->rm_startblock;
|
||||
__entry->owner = rmap->rm_owner;
|
||||
__entry->offset = rmap->rm_offset;
|
||||
__entry->flags = rmap->rm_flags;
|
||||
),
|
||||
TP_printk("dev %d:%d keydev %d:%d agno 0x%x startblock 0x%llx fsbcount 0x%llx owner 0x%llx fileoff 0x%llx flags 0x%x",
|
||||
TP_printk("dev %d:%d keydev %d:%d agno 0x%x startblock 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
MAJOR(__entry->keydev), MINOR(__entry->keydev),
|
||||
__entry->agno,
|
||||
__entry->bno,
|
||||
__entry->len,
|
||||
__entry->agbno,
|
||||
__entry->owner,
|
||||
__entry->offset,
|
||||
__entry->flags)
|
||||
)
|
||||
#define DEFINE_FSMAP_EVENT(name) \
|
||||
DEFINE_EVENT(xfs_fsmap_class, name, \
|
||||
#define DEFINE_FSMAP_GROUP_KEY_EVENT(name) \
|
||||
DEFINE_EVENT(xfs_fsmap_group_key_class, name, \
|
||||
TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno, \
|
||||
const struct xfs_rmap_irec *rmap), \
|
||||
TP_ARGS(mp, keydev, agno, rmap))
|
||||
DEFINE_FSMAP_EVENT(xfs_fsmap_low_key);
|
||||
DEFINE_FSMAP_EVENT(xfs_fsmap_high_key);
|
||||
DEFINE_FSMAP_EVENT(xfs_fsmap_mapping);
|
||||
DEFINE_FSMAP_GROUP_KEY_EVENT(xfs_fsmap_low_group_key);
|
||||
DEFINE_FSMAP_GROUP_KEY_EVENT(xfs_fsmap_high_group_key);
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_fsmap_linear_class,
|
||||
TP_PROTO(struct xfs_mount *mp, u32 keydev, uint64_t bno),
|
||||
DECLARE_EVENT_CLASS(xfs_fsmap_linear_key_class,
|
||||
TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_fsblock_t bno),
|
||||
TP_ARGS(mp, keydev, bno),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
@ -3942,12 +3978,12 @@ DECLARE_EVENT_CLASS(xfs_fsmap_linear_class,
|
||||
MAJOR(__entry->keydev), MINOR(__entry->keydev),
|
||||
__entry->bno)
|
||||
)
|
||||
#define DEFINE_FSMAP_LINEAR_EVENT(name) \
|
||||
DEFINE_EVENT(xfs_fsmap_linear_class, name, \
|
||||
#define DEFINE_FSMAP_LINEAR_KEY_EVENT(name) \
|
||||
DEFINE_EVENT(xfs_fsmap_linear_key_class, name, \
|
||||
TP_PROTO(struct xfs_mount *mp, u32 keydev, uint64_t bno), \
|
||||
TP_ARGS(mp, keydev, bno))
|
||||
DEFINE_FSMAP_LINEAR_EVENT(xfs_fsmap_low_key_linear);
|
||||
DEFINE_FSMAP_LINEAR_EVENT(xfs_fsmap_high_key_linear);
|
||||
DEFINE_FSMAP_LINEAR_KEY_EVENT(xfs_fsmap_low_linear_key);
|
||||
DEFINE_FSMAP_LINEAR_KEY_EVENT(xfs_fsmap_high_linear_key);
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_getfsmap_class,
|
||||
TP_PROTO(struct xfs_mount *mp, struct xfs_fsmap *fsmap),
|
||||
|
Loading…
Reference in New Issue
Block a user