mirror of
https://github.com/torvalds/linux.git
synced 2024-12-09 04:31:39 +00:00
ffc18582ed
This ambitious series aims to cleans up redundant inode walk code in xfs_icache.c, hide implementation details of the quotaoff dquot release code, and eliminates indirect function calls from incore inode walks. The first thing it does is to move all the code that quotaoff calls to release dquots from all incore inodes into xfs_icache.c. Next, it separates the goal of an inode walk from the actual radix tree tags that may or may not be involved and drops the kludgy XFS_ICI_NO_TAG thing. Finally, we split the speculative preallocation (blockgc) and quotaoff dquot release code paths into separate functions so that we can keep the implementations cohesive. Christoph suggested last cycle that we 'simply' change quotaoff not to allow deactivating quota entirely, but as these cleanups are to enable one major change in behavior (deferred inode inactivation) I do not want to add a second behavior change (quotaoff) as a dependency. To be blunt: Additional cleanups are not in scope for this series. Next, I made two observations about incore inode radix tree walks -- since there's a 1:1 mapping between the walk goal and the per-inode processing function passed in, we can use the goal to make a direct call to the processing function. Furthermore, the only caller to supply a nonzero iter_flags argument is quotaoff, and there's only one INEW flag. From that observation, I concluded that it's quite possible to remove two parameters from the xfs_inode_walk* function signatures -- the iter_flags, and the execute function pointer. The middle of the series moves the INEW functionality into the one piece (quotaoff) that wants it, and removes the indirect calls. The final observation is that the inode reclaim walk loop is now almost the same as xfs_inode_walk, so it's silly to maintain two copies. Merge the reclaim loop code into xfs_inode_walk. Lastly, refactor the per-ag radix tagging functions since there's duplicated code that can be consolidated. This series is a prerequisite for the next two patchsets, since deferred inode inactivation will add another inode radix tree tag and iterator function to xfs_inode_walk. v2: walk the vfs inode list when running quotaoff instead of the radix tree, then rework the (now completely internal) inode walk function to take the tag as the main parameter. v3: merge the reclaim loop into xfs_inode_walk, then consolidate the radix tree tagging functions v4: rebase to 5.13-rc4 v5: combine with the quotaoff patchset, reorder functions to minimize forward declarations, split inode walk goals from radix tree tags to reduce conceptual confusion v6: start moving the inode cache code towards the xfs_icwalk prefix -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEUzaAxoMeQq6m2jMV+H93GTRKtOsFAmC5Yv0ACgkQ+H93GTRK tOv7Fg//Z7cKph0zSg6qsukMEMZxscuNcEBydCW1bu9gSx1NpszDpiGqAiO5ZB3X wP2XkCqjuatbNGGvkNLHS/M4sbLX3ELogvYmMRvUhDoaSFxT/KKgxvsyNffiCSS7 xRB/rvWRp9MGRpBWPF0ZUxFU6VBzhCrYdMsNhvW95AEup8S/j+NplwoIif0gzaZZ Q6Fl4Ca9VEBvJQPV+/zkLih19iFItmARJhPHUs4BO1nZv+CzZBFQHg7Ijw7nW92j eSY68W4LH/IQ5cqm+HrD/+Z6ns0P7J2viewzVymkNEGnuX4a0xrQrzQ8ydRsAxTi 9EDrpIe3MbSI5YjJfmRe8G3LX5p7vBpqc8TeyZdRDMGWkFjT33HPlQNb6WxKLQbA mjKdfr8AYZR/UQKW/7oZFrJnOoMpYRAQ4Sn/9BAYZQYm7tiLzuZsrEZ7JBwiUA56 XHmlsDDeLzJeKvjmUu8M3H4oh4Nwf5/I2vJwHjueTfhl83uJP04igIXC4rnq56bM AAAjH9uV11Fo3q0ywAnRtN2HYj8PEJlCMK5CNskILrGeMITsBPGht0SbaA6hDI2h GYmltKInHzuPhHC9NfyPVrVr3BrmPR5cBsVFESiz5A4E9rbuKmmna6Yk8MFlMyl8 FRIA3zVatJ2qQXtsAcdI8AZzMd7ciYhkAgCqFKxv8qK/qxITHh4= =Rxdn -----END PGP SIGNATURE----- Merge tag 'inode-walk-cleanups-5.14_2021-06-03' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-5.14-merge2 xfs: clean up incore inode walk functions This ambitious series aims to cleans up redundant inode walk code in xfs_icache.c, hide implementation details of the quotaoff dquot release code, and eliminates indirect function calls from incore inode walks. The first thing it does is to move all the code that quotaoff calls to release dquots from all incore inodes into xfs_icache.c. Next, it separates the goal of an inode walk from the actual radix tree tags that may or may not be involved and drops the kludgy XFS_ICI_NO_TAG thing. Finally, we split the speculative preallocation (blockgc) and quotaoff dquot release code paths into separate functions so that we can keep the implementations cohesive. Christoph suggested last cycle that we 'simply' change quotaoff not to allow deactivating quota entirely, but as these cleanups are to enable one major change in behavior (deferred inode inactivation) I do not want to add a second behavior change (quotaoff) as a dependency. To be blunt: Additional cleanups are not in scope for this series. Next, I made two observations about incore inode radix tree walks -- since there's a 1:1 mapping between the walk goal and the per-inode processing function passed in, we can use the goal to make a direct call to the processing function. Furthermore, the only caller to supply a nonzero iter_flags argument is quotaoff, and there's only one INEW flag. From that observation, I concluded that it's quite possible to remove two parameters from the xfs_inode_walk* function signatures -- the iter_flags, and the execute function pointer. The middle of the series moves the INEW functionality into the one piece (quotaoff) that wants it, and removes the indirect calls. The final observation is that the inode reclaim walk loop is now almost the same as xfs_inode_walk, so it's silly to maintain two copies. Merge the reclaim loop code into xfs_inode_walk. Lastly, refactor the per-ag radix tagging functions since there's duplicated code that can be consolidated. This series is a prerequisite for the next two patchsets, since deferred inode inactivation will add another inode radix tree tag and iterator function to xfs_inode_walk. v2: walk the vfs inode list when running quotaoff instead of the radix tree, then rework the (now completely internal) inode walk function to take the tag as the main parameter. v3: merge the reclaim loop into xfs_inode_walk, then consolidate the radix tree tagging functions v4: rebase to 5.13-rc4 v5: combine with the quotaoff patchset, reorder functions to minimize forward declarations, split inode walk goals from radix tree tags to reduce conceptual confusion v6: start moving the inode cache code towards the xfs_icwalk prefix * tag 'inode-walk-cleanups-5.14_2021-06-03' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux: xfs: refactor per-AG inode tagging functions xfs: merge xfs_reclaim_inodes_ag into xfs_inode_walk_ag xfs: pass struct xfs_eofblocks to the inode scan callback xfs: fix radix tree tag signs xfs: make the icwalk processing functions clean up the grab state xfs: clean up inode state flag tests in xfs_blockgc_igrab xfs: remove indirect calls from xfs_inode_walk{,_ag} xfs: remove iter_flags parameter from xfs_inode_walk_* xfs: move xfs_inew_wait call into xfs_dqrele_inode xfs: separate the dqrele_all inode grab logic from xfs_inode_walk_ag_grab xfs: pass the goal of the incore inode walk to xfs_inode_walk() xfs: rename xfs_inode_walk functions to xfs_icwalk xfs: move the inode walk functions further down xfs: detach inode dquots at the end of inactivation xfs: move the quotaoff dqrele inode walk into xfs_icache.c [djwong: added variable names to function declarations while fixing merge conflicts] Signed-off-by: Darrick J. Wong <djwong@kernel.org>
171 lines
5.8 KiB
C
171 lines
5.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (c) 2018 Red Hat, Inc.
|
|
* All rights reserved.
|
|
*/
|
|
|
|
#ifndef __LIBXFS_AG_H
|
|
#define __LIBXFS_AG_H 1
|
|
|
|
struct xfs_mount;
|
|
struct xfs_trans;
|
|
struct xfs_perag;
|
|
|
|
/*
|
|
* Per-ag infrastructure
|
|
*/
|
|
|
|
/* per-AG block reservation data structures*/
|
|
struct xfs_ag_resv {
|
|
/* number of blocks originally reserved here */
|
|
xfs_extlen_t ar_orig_reserved;
|
|
/* number of blocks reserved here */
|
|
xfs_extlen_t ar_reserved;
|
|
/* number of blocks originally asked for */
|
|
xfs_extlen_t ar_asked;
|
|
};
|
|
|
|
/*
|
|
* Per-ag incore structure, copies of information in agf and agi, to improve the
|
|
* performance of allocation group selection.
|
|
*/
|
|
struct xfs_perag {
|
|
struct xfs_mount *pag_mount; /* owner filesystem */
|
|
xfs_agnumber_t pag_agno; /* AG this structure belongs to */
|
|
atomic_t pag_ref; /* perag reference count */
|
|
char pagf_init; /* this agf's entry is initialized */
|
|
char pagi_init; /* this agi's entry is initialized */
|
|
char pagf_metadata; /* the agf is preferred to be metadata */
|
|
char pagi_inodeok; /* The agi is ok for inodes */
|
|
uint8_t pagf_levels[XFS_BTNUM_AGF];
|
|
/* # of levels in bno & cnt btree */
|
|
bool pagf_agflreset; /* agfl requires reset before use */
|
|
uint32_t pagf_flcount; /* count of blocks in freelist */
|
|
xfs_extlen_t pagf_freeblks; /* total free blocks */
|
|
xfs_extlen_t pagf_longest; /* longest free space */
|
|
uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
|
|
xfs_agino_t pagi_freecount; /* number of free inodes */
|
|
xfs_agino_t pagi_count; /* number of allocated inodes */
|
|
|
|
/*
|
|
* Inode allocation search lookup optimisation.
|
|
* If the pagino matches, the search for new inodes
|
|
* doesn't need to search the near ones again straight away
|
|
*/
|
|
xfs_agino_t pagl_pagino;
|
|
xfs_agino_t pagl_leftrec;
|
|
xfs_agino_t pagl_rightrec;
|
|
|
|
int pagb_count; /* pagb slots in use */
|
|
uint8_t pagf_refcount_level; /* recount btree height */
|
|
|
|
/* Blocks reserved for all kinds of metadata. */
|
|
struct xfs_ag_resv pag_meta_resv;
|
|
/* Blocks reserved for the reverse mapping btree. */
|
|
struct xfs_ag_resv pag_rmapbt_resv;
|
|
|
|
/* -- kernel only structures below this line -- */
|
|
|
|
/*
|
|
* Bitsets of per-ag metadata that have been checked and/or are sick.
|
|
* Callers should hold pag_state_lock before accessing this field.
|
|
*/
|
|
uint16_t pag_checked;
|
|
uint16_t pag_sick;
|
|
spinlock_t pag_state_lock;
|
|
|
|
spinlock_t pagb_lock; /* lock for pagb_tree */
|
|
struct rb_root pagb_tree; /* ordered tree of busy extents */
|
|
unsigned int pagb_gen; /* generation count for pagb_tree */
|
|
wait_queue_head_t pagb_wait; /* woken when pagb_gen changes */
|
|
|
|
atomic_t pagf_fstrms; /* # of filestreams active in this AG */
|
|
|
|
spinlock_t pag_ici_lock; /* incore inode cache lock */
|
|
struct radix_tree_root pag_ici_root; /* incore inode cache root */
|
|
int pag_ici_reclaimable; /* reclaimable inodes */
|
|
unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */
|
|
|
|
/* buffer cache index */
|
|
spinlock_t pag_buf_lock; /* lock for pag_buf_hash */
|
|
struct rhashtable pag_buf_hash;
|
|
|
|
/* for rcu-safe freeing */
|
|
struct rcu_head rcu_head;
|
|
|
|
/* background prealloc block trimming */
|
|
struct delayed_work pag_blockgc_work;
|
|
|
|
/*
|
|
* Unlinked inode information. This incore information reflects
|
|
* data stored in the AGI, so callers must hold the AGI buffer lock
|
|
* or have some other means to control concurrency.
|
|
*/
|
|
struct rhashtable pagi_unlinked_hash;
|
|
};
|
|
|
|
int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount,
|
|
xfs_agnumber_t *maxagi);
|
|
int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno);
|
|
void xfs_free_perag(struct xfs_mount *mp);
|
|
|
|
struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
|
|
struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno,
|
|
unsigned int tag);
|
|
void xfs_perag_put(struct xfs_perag *pag);
|
|
|
|
/*
|
|
* Perag iteration APIs
|
|
*
|
|
* XXX: for_each_perag_range() usage really needs an iterator to clean up when
|
|
* we terminate at end_agno because we may have taken a reference to the perag
|
|
* beyond end_agno. Right now callers have to be careful to catch and clean that
|
|
* up themselves. This is not necessary for the callers of for_each_perag() and
|
|
* for_each_perag_from() because they terminate at sb_agcount where there are
|
|
* no perag structures in tree beyond end_agno.
|
|
*/
|
|
#define for_each_perag_range(mp, next_agno, end_agno, pag) \
|
|
for ((pag) = xfs_perag_get((mp), (next_agno)); \
|
|
(pag) != NULL && (next_agno) <= (end_agno); \
|
|
(next_agno) = (pag)->pag_agno + 1, \
|
|
xfs_perag_put(pag), \
|
|
(pag) = xfs_perag_get((mp), (next_agno)))
|
|
|
|
#define for_each_perag_from(mp, next_agno, pag) \
|
|
for_each_perag_range((mp), (next_agno), (mp)->m_sb.sb_agcount, (pag))
|
|
|
|
|
|
#define for_each_perag(mp, agno, pag) \
|
|
(agno) = 0; \
|
|
for_each_perag_from((mp), (agno), (pag))
|
|
|
|
#define for_each_perag_tag(mp, agno, pag, tag) \
|
|
for ((agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
|
|
(pag) != NULL; \
|
|
(agno) = (pag)->pag_agno + 1, \
|
|
xfs_perag_put(pag), \
|
|
(pag) = xfs_perag_get_tag((mp), (agno), (tag)))
|
|
|
|
struct aghdr_init_data {
|
|
/* per ag data */
|
|
xfs_agblock_t agno; /* ag to init */
|
|
xfs_extlen_t agsize; /* new AG size */
|
|
struct list_head buffer_list; /* buffer writeback list */
|
|
xfs_rfsblock_t nfree; /* cumulative new free space */
|
|
|
|
/* per header data */
|
|
xfs_daddr_t daddr; /* header location */
|
|
size_t numblks; /* size of header */
|
|
xfs_btnum_t type; /* type of btree root block */
|
|
};
|
|
|
|
int xfs_ag_init_headers(struct xfs_mount *mp, struct aghdr_init_data *id);
|
|
int xfs_ag_shrink_space(struct xfs_mount *mp, struct xfs_trans **tpp,
|
|
xfs_agnumber_t agno, xfs_extlen_t delta);
|
|
int xfs_ag_extend_space(struct xfs_mount *mp, struct xfs_trans *tp,
|
|
struct aghdr_init_data *id, xfs_extlen_t len);
|
|
int xfs_ag_get_geometry(struct xfs_mount *mp, xfs_agnumber_t agno,
|
|
struct xfs_ag_geometry *ageo);
|
|
|
|
#endif /* __LIBXFS_AG_H */
|