forked from Minki/linux
Changes since last update:
- Cleanup unnecessary function call parameters - Fix a use-after-free bug when aborting logging intents - Refactor filestreams state data to avoid use-after-free bug - Fix incorrect removal of cow extents when truncating extended attributes. - Refactor open-coded __set_page_dirty in favor of using vfs function. - Fix a deadlock when fstrim and fs shutdown race. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCgAGBQJazZ/HAAoJEPh/dxk0SrTrGeYP/Asis7MZ3TWfzsPHwK6EoH+w q1VnKVMqjSRnE8DYmx8w3tMqny2qg9klLkT1SRRz9Htr5CER5XqIX6ZYlQpVKx2R ycS+I1l/V/kqEmAgDgSK3DZ5uMgKHfo0w7GbKFDg69YDztN3yNBpDfUZ4VqA/Ua2 ADaeMYkJh6oBB5yZAWGyAJEM5TieqQppqyc+WLhbORyjEFreUTTmLqbzLPnceSih rQLg0noDwZK0XqbwndYXGTNKKoQtiJalnZP18DhH4zOr+FH03i/gmlU3w+ANl3eX IEE0TR2EkNHLZeduz7xT7ZHUOo0TaGObBK8CJFSojibQ/HooVjLcFQResd3q0coN WTkbTuxHwMqk2IujKRTqli/saENhvFrOrm/nYTFPw9+3GpRt0iLrXPSBbeMjmsZG XntdimPEHywjYrdW10VRH+6E6tvQiC/tl3abBuXdaEOJs1KZmPYNt42EF0ZQ5Xs5 IeDOhPLuuyUwRf12RVA9WS6xGdMR0+foMqncXZNcAzxQeAVfUNEtASNMTNIOO2H5 xD34/1ooFJnwT755VLT9U/qUd5CHtWO3AkH+9RVCpKWTaEOSY+gV6ZgmINv9npur Vw2xnZwxysegVlu76uct/b/sy/8J3OKYIoSYwbt6Zxi0M1WF9zanJ9pKCsBYdL6A xWoMr0X1yFGcYJozqwJU =uGz0 -----END PGP SIGNATURE----- Merge tag 'xfs-4.17-merge-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux Pull more xfs updates from Darrick Wong: "Most of these are code cleanups, but there are a couple of notable use-after-free bug fixes. This series has been run through a full xfstests run over the week and through a quick xfstests run against this morning's master, with no major failures reported. - clean up unnecessary function call parameters - fix a use-after-free bug when aborting logging intents - refactor filestreams state data to avoid use-after-free bug - fix incorrect removal of cow extents when truncating extended attributes. - refactor open-coded __set_page_dirty in favor of using vfs function. - fix a deadlock when fstrim and fs shutdown race" * tag 'xfs-4.17-merge-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: Force log to disk before reading the AGF during a fstrim Export __set_page_dirty xfs: only cancel cow blocks when truncating the data fork xfs: non-scrub - remove unused function parameters xfs: remove filestream item xfs_inode reference xfs: fix intent use-after-free on abort xfs: Remove "committed" argument of xfs_dir_ialloc
This commit is contained in:
commit
80aa76bcd3
@ -1947,7 +1947,7 @@ void
|
||||
xfs_alloc_compute_maxlevels(
|
||||
xfs_mount_t *mp) /* file system mount structure */
|
||||
{
|
||||
mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_alloc_mnr,
|
||||
mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
|
||||
(mp->m_sb.sb_agblocks + 1) / 2);
|
||||
}
|
||||
|
||||
@ -1959,7 +1959,6 @@ xfs_alloc_compute_maxlevels(
|
||||
*/
|
||||
xfs_extlen_t
|
||||
xfs_alloc_longest_free_extent(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_perag *pag,
|
||||
xfs_extlen_t need,
|
||||
xfs_extlen_t reserved)
|
||||
@ -2038,8 +2037,7 @@ xfs_alloc_space_available(
|
||||
|
||||
/* do we have enough contiguous free space for the allocation? */
|
||||
alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
|
||||
longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
|
||||
reservation);
|
||||
longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
|
||||
if (longest < alloc_len)
|
||||
return false;
|
||||
|
||||
|
@ -116,9 +116,8 @@ xfs_alloc_allow_busy_reuse(int datatype)
|
||||
unsigned int xfs_alloc_set_aside(struct xfs_mount *mp);
|
||||
unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp);
|
||||
|
||||
xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_mount *mp,
|
||||
struct xfs_perag *pag, xfs_extlen_t need,
|
||||
xfs_extlen_t reserved);
|
||||
xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_perag *pag,
|
||||
xfs_extlen_t need, xfs_extlen_t reserved);
|
||||
unsigned int xfs_alloc_min_freelist(struct xfs_mount *mp,
|
||||
struct xfs_perag *pag);
|
||||
|
||||
|
@ -3225,7 +3225,7 @@ xfs_bmap_longest_free_extent(
|
||||
}
|
||||
}
|
||||
|
||||
longest = xfs_alloc_longest_free_extent(mp, pag,
|
||||
longest = xfs_alloc_longest_free_extent(pag,
|
||||
xfs_alloc_min_freelist(mp, pag),
|
||||
xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
|
||||
if (*blen < longest)
|
||||
@ -5667,7 +5667,6 @@ xfs_bmap_collapse_extents(
|
||||
xfs_fileoff_t *next_fsb,
|
||||
xfs_fileoff_t offset_shift_fsb,
|
||||
bool *done,
|
||||
xfs_fileoff_t stop_fsb,
|
||||
xfs_fsblock_t *firstblock,
|
||||
struct xfs_defer_ops *dfops)
|
||||
{
|
||||
|
@ -228,7 +228,7 @@ void xfs_bmap_del_extent_cow(struct xfs_inode *ip,
|
||||
uint xfs_default_attroffset(struct xfs_inode *ip);
|
||||
int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
|
||||
bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
|
||||
bool *done, xfs_fsblock_t *firstblock,
|
||||
struct xfs_defer_ops *dfops);
|
||||
int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
|
||||
|
@ -4531,7 +4531,6 @@ xfs_btree_sblock_verify(
|
||||
*/
|
||||
uint
|
||||
xfs_btree_compute_maxlevels(
|
||||
struct xfs_mount *mp,
|
||||
uint *limits,
|
||||
unsigned long len)
|
||||
{
|
||||
@ -4839,7 +4838,6 @@ xfs_btree_query_all(
|
||||
*/
|
||||
xfs_extlen_t
|
||||
xfs_btree_calc_size(
|
||||
struct xfs_mount *mp,
|
||||
uint *limits,
|
||||
unsigned long long len)
|
||||
{
|
||||
|
@ -481,10 +481,8 @@ xfs_failaddr_t xfs_btree_lblock_v5hdr_verify(struct xfs_buf *bp,
|
||||
xfs_failaddr_t xfs_btree_lblock_verify(struct xfs_buf *bp,
|
||||
unsigned int max_recs);
|
||||
|
||||
uint xfs_btree_compute_maxlevels(struct xfs_mount *mp, uint *limits,
|
||||
unsigned long len);
|
||||
xfs_extlen_t xfs_btree_calc_size(struct xfs_mount *mp, uint *limits,
|
||||
unsigned long long len);
|
||||
uint xfs_btree_compute_maxlevels(uint *limits, unsigned long len);
|
||||
xfs_extlen_t xfs_btree_calc_size(uint *limits, unsigned long long len);
|
||||
|
||||
/* return codes */
|
||||
#define XFS_BTREE_QUERY_RANGE_CONTINUE 0 /* keep iterating */
|
||||
|
@ -2406,7 +2406,7 @@ xfs_ialloc_compute_maxlevels(
|
||||
uint inodes;
|
||||
|
||||
inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
|
||||
mp->m_in_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_inobt_mnr,
|
||||
mp->m_in_maxlevels = xfs_btree_compute_maxlevels(mp->m_inobt_mnr,
|
||||
inodes);
|
||||
}
|
||||
|
||||
|
@ -556,7 +556,7 @@ xfs_inobt_max_size(
|
||||
if (mp->m_inobt_mxr[0] == 0)
|
||||
return 0;
|
||||
|
||||
return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
|
||||
return xfs_btree_calc_size(mp->m_inobt_mnr,
|
||||
(uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
|
||||
XFS_INODES_PER_CHUNK);
|
||||
}
|
||||
|
@ -351,7 +351,6 @@ xfs_refcount_merge_center_extents(
|
||||
struct xfs_refcount_irec *center,
|
||||
struct xfs_refcount_irec *right,
|
||||
unsigned long long extlen,
|
||||
xfs_agblock_t *agbno,
|
||||
xfs_extlen_t *aglen)
|
||||
{
|
||||
int error;
|
||||
@ -471,7 +470,6 @@ xfs_refcount_merge_right_extent(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_refcount_irec *right,
|
||||
struct xfs_refcount_irec *cright,
|
||||
xfs_agblock_t *agbno,
|
||||
xfs_extlen_t *aglen)
|
||||
{
|
||||
int error;
|
||||
@ -749,7 +747,7 @@ xfs_refcount_merge_extents(
|
||||
ulen < MAXREFCEXTLEN) {
|
||||
*shape_changed = true;
|
||||
return xfs_refcount_merge_center_extents(cur, &left, &cleft,
|
||||
&right, ulen, agbno, aglen);
|
||||
&right, ulen, aglen);
|
||||
}
|
||||
|
||||
/* Try to merge left and cleft. */
|
||||
@ -778,7 +776,7 @@ xfs_refcount_merge_extents(
|
||||
ulen < MAXREFCEXTLEN) {
|
||||
*shape_changed = true;
|
||||
return xfs_refcount_merge_right_extent(cur, &right, &cright,
|
||||
agbno, aglen);
|
||||
aglen);
|
||||
}
|
||||
|
||||
return error;
|
||||
@ -1356,9 +1354,7 @@ xfs_refcount_adjust_cow_extents(
|
||||
struct xfs_btree_cur *cur,
|
||||
xfs_agblock_t agbno,
|
||||
xfs_extlen_t aglen,
|
||||
enum xfs_refc_adjust_op adj,
|
||||
struct xfs_defer_ops *dfops,
|
||||
struct xfs_owner_info *oinfo)
|
||||
enum xfs_refc_adjust_op adj)
|
||||
{
|
||||
struct xfs_refcount_irec ext, tmp;
|
||||
int error;
|
||||
@ -1437,8 +1433,7 @@ xfs_refcount_adjust_cow(
|
||||
struct xfs_btree_cur *cur,
|
||||
xfs_agblock_t agbno,
|
||||
xfs_extlen_t aglen,
|
||||
enum xfs_refc_adjust_op adj,
|
||||
struct xfs_defer_ops *dfops)
|
||||
enum xfs_refc_adjust_op adj)
|
||||
{
|
||||
bool shape_changed;
|
||||
int error;
|
||||
@ -1465,8 +1460,7 @@ xfs_refcount_adjust_cow(
|
||||
goto out_error;
|
||||
|
||||
/* Now that we've taken care of the ends, adjust the middle extents */
|
||||
error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj,
|
||||
dfops, NULL);
|
||||
error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj);
|
||||
if (error)
|
||||
goto out_error;
|
||||
|
||||
@ -1493,7 +1487,7 @@ __xfs_refcount_cow_alloc(
|
||||
|
||||
/* Add refcount btree reservation */
|
||||
return xfs_refcount_adjust_cow(rcur, agbno, aglen,
|
||||
XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops);
|
||||
XFS_REFCOUNT_ADJUST_COW_ALLOC);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1511,7 +1505,7 @@ __xfs_refcount_cow_free(
|
||||
|
||||
/* Remove refcount btree reservation */
|
||||
return xfs_refcount_adjust_cow(rcur, agbno, aglen,
|
||||
XFS_REFCOUNT_ADJUST_COW_FREE, dfops);
|
||||
XFS_REFCOUNT_ADJUST_COW_FREE);
|
||||
}
|
||||
|
||||
/* Record a CoW staging extent in the refcount btree. */
|
||||
@ -1568,7 +1562,7 @@ struct xfs_refcount_recovery {
|
||||
/* Stuff an extent on the recovery list. */
|
||||
STATIC int
|
||||
xfs_refcount_recover_extent(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_rec *rec,
|
||||
void *priv)
|
||||
{
|
||||
|
@ -373,7 +373,6 @@ xfs_refcountbt_init_cursor(
|
||||
*/
|
||||
int
|
||||
xfs_refcountbt_maxrecs(
|
||||
struct xfs_mount *mp,
|
||||
int blocklen,
|
||||
bool leaf)
|
||||
{
|
||||
@ -390,7 +389,7 @@ void
|
||||
xfs_refcountbt_compute_maxlevels(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(mp,
|
||||
mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
|
||||
mp->m_refc_mnr, mp->m_sb.sb_agblocks);
|
||||
}
|
||||
|
||||
@ -400,7 +399,7 @@ xfs_refcountbt_calc_size(
|
||||
struct xfs_mount *mp,
|
||||
unsigned long long len)
|
||||
{
|
||||
return xfs_btree_calc_size(mp, mp->m_refc_mnr, len);
|
||||
return xfs_btree_calc_size(mp->m_refc_mnr, len);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -60,8 +60,7 @@ struct xfs_mount;
|
||||
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
|
||||
struct xfs_trans *tp, struct xfs_buf *agbp, xfs_agnumber_t agno,
|
||||
struct xfs_defer_ops *dfops);
|
||||
extern int xfs_refcountbt_maxrecs(struct xfs_mount *mp, int blocklen,
|
||||
bool leaf);
|
||||
extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
|
||||
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
|
||||
|
||||
extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp,
|
||||
|
@ -376,7 +376,6 @@ xfs_rmap_free_check_owner(
|
||||
struct xfs_mount *mp,
|
||||
uint64_t ltoff,
|
||||
struct xfs_rmap_irec *rec,
|
||||
xfs_fsblock_t bno,
|
||||
xfs_filblks_t len,
|
||||
uint64_t owner,
|
||||
uint64_t offset,
|
||||
@ -519,7 +518,7 @@ xfs_rmap_unmap(
|
||||
bno + len, out_error);
|
||||
|
||||
/* Check owner information. */
|
||||
error = xfs_rmap_free_check_owner(mp, ltoff, <rec, bno, len, owner,
|
||||
error = xfs_rmap_free_check_owner(mp, ltoff, <rec, len, owner,
|
||||
offset, flags);
|
||||
if (error)
|
||||
goto out_error;
|
||||
|
@ -499,7 +499,6 @@ xfs_rmapbt_init_cursor(
|
||||
*/
|
||||
int
|
||||
xfs_rmapbt_maxrecs(
|
||||
struct xfs_mount *mp,
|
||||
int blocklen,
|
||||
int leaf)
|
||||
{
|
||||
@ -534,7 +533,7 @@ xfs_rmapbt_compute_maxlevels(
|
||||
if (xfs_sb_version_hasreflink(&mp->m_sb))
|
||||
mp->m_rmap_maxlevels = XFS_BTREE_MAXLEVELS;
|
||||
else
|
||||
mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(mp,
|
||||
mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(
|
||||
mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
|
||||
}
|
||||
|
||||
@ -544,7 +543,7 @@ xfs_rmapbt_calc_size(
|
||||
struct xfs_mount *mp,
|
||||
unsigned long long len)
|
||||
{
|
||||
return xfs_btree_calc_size(mp, mp->m_rmap_mnr, len);
|
||||
return xfs_btree_calc_size(mp->m_rmap_mnr, len);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -55,7 +55,7 @@ struct xfs_mount;
|
||||
struct xfs_btree_cur *xfs_rmapbt_init_cursor(struct xfs_mount *mp,
|
||||
struct xfs_trans *tp, struct xfs_buf *bp,
|
||||
xfs_agnumber_t agno);
|
||||
int xfs_rmapbt_maxrecs(struct xfs_mount *mp, int blocklen, int leaf);
|
||||
int xfs_rmapbt_maxrecs(int blocklen, int leaf);
|
||||
extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
|
||||
|
||||
extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
|
||||
|
@ -756,15 +756,13 @@ xfs_sb_mount_common(
|
||||
mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
|
||||
mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
|
||||
|
||||
mp->m_rmap_mxr[0] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, 1);
|
||||
mp->m_rmap_mxr[1] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, 0);
|
||||
mp->m_rmap_mxr[0] = xfs_rmapbt_maxrecs(sbp->sb_blocksize, 1);
|
||||
mp->m_rmap_mxr[1] = xfs_rmapbt_maxrecs(sbp->sb_blocksize, 0);
|
||||
mp->m_rmap_mnr[0] = mp->m_rmap_mxr[0] / 2;
|
||||
mp->m_rmap_mnr[1] = mp->m_rmap_mxr[1] / 2;
|
||||
|
||||
mp->m_refc_mxr[0] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize,
|
||||
true);
|
||||
mp->m_refc_mxr[1] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize,
|
||||
false);
|
||||
mp->m_refc_mxr[0] = xfs_refcountbt_maxrecs(sbp->sb_blocksize, true);
|
||||
mp->m_refc_mxr[1] = xfs_refcountbt_maxrecs(sbp->sb_blocksize, false);
|
||||
mp->m_refc_mnr[0] = mp->m_refc_mxr[0] / 2;
|
||||
mp->m_refc_mnr[1] = mp->m_refc_mxr[1] / 2;
|
||||
|
||||
|
@ -734,8 +734,7 @@ xfs_calc_clear_agi_bucket_reservation(
|
||||
* the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_qm_setqlim_reservation(
|
||||
struct xfs_mount *mp)
|
||||
xfs_calc_qm_setqlim_reservation(void)
|
||||
{
|
||||
return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot));
|
||||
}
|
||||
@ -772,8 +771,7 @@ xfs_calc_qm_quotaoff_reservation(
|
||||
* the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_qm_quotaoff_end_reservation(
|
||||
struct xfs_mount *mp)
|
||||
xfs_calc_qm_quotaoff_end_reservation(void)
|
||||
{
|
||||
return sizeof(struct xfs_qoff_logitem) * 2;
|
||||
}
|
||||
@ -877,14 +875,14 @@ xfs_trans_resv_calc(
|
||||
* The following transactions are logged in logical format with
|
||||
* a default log count.
|
||||
*/
|
||||
resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp);
|
||||
resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation();
|
||||
resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
|
||||
|
||||
resp->tr_qm_quotaoff.tr_logres = xfs_calc_qm_quotaoff_reservation(mp);
|
||||
resp->tr_qm_quotaoff.tr_logcount = XFS_DEFAULT_LOG_COUNT;
|
||||
|
||||
resp->tr_qm_equotaoff.tr_logres =
|
||||
xfs_calc_qm_quotaoff_end_reservation(mp);
|
||||
xfs_calc_qm_quotaoff_end_reservation();
|
||||
resp->tr_qm_equotaoff.tr_logcount = XFS_DEFAULT_LOG_COUNT;
|
||||
|
||||
resp->tr_sb.tr_logres = xfs_calc_sb_reservation(mp);
|
||||
|
@ -53,6 +53,25 @@ xfs_bui_item_free(
|
||||
kmem_zone_free(xfs_bui_zone, buip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Freeing the BUI requires that we remove it from the AIL if it has already
|
||||
* been placed there. However, the BUI may not yet have been placed in the AIL
|
||||
* when called by xfs_bui_release() from BUD processing due to the ordering of
|
||||
* committed vs unpin operations in bulk insert operations. Hence the reference
|
||||
* count to ensure only the last caller frees the BUI.
|
||||
*/
|
||||
void
|
||||
xfs_bui_release(
|
||||
struct xfs_bui_log_item *buip)
|
||||
{
|
||||
ASSERT(atomic_read(&buip->bui_refcount) > 0);
|
||||
if (atomic_dec_and_test(&buip->bui_refcount)) {
|
||||
xfs_trans_ail_remove(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_bui_item_free(buip);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
STATIC void
|
||||
xfs_bui_item_size(
|
||||
struct xfs_log_item *lip,
|
||||
@ -142,7 +161,7 @@ xfs_bui_item_unlock(
|
||||
struct xfs_log_item *lip)
|
||||
{
|
||||
if (lip->li_flags & XFS_LI_ABORTED)
|
||||
xfs_bui_item_free(BUI_ITEM(lip));
|
||||
xfs_bui_release(BUI_ITEM(lip));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -206,24 +225,6 @@ xfs_bui_init(
|
||||
return buip;
|
||||
}
|
||||
|
||||
/*
|
||||
* Freeing the BUI requires that we remove it from the AIL if it has already
|
||||
* been placed there. However, the BUI may not yet have been placed in the AIL
|
||||
* when called by xfs_bui_release() from BUD processing due to the ordering of
|
||||
* committed vs unpin operations in bulk insert operations. Hence the reference
|
||||
* count to ensure only the last caller frees the BUI.
|
||||
*/
|
||||
void
|
||||
xfs_bui_release(
|
||||
struct xfs_bui_log_item *buip)
|
||||
{
|
||||
ASSERT(atomic_read(&buip->bui_refcount) > 0);
|
||||
if (atomic_dec_and_test(&buip->bui_refcount)) {
|
||||
xfs_trans_ail_remove(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_bui_item_free(buip);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct xfs_bud_log_item *BUD_ITEM(struct xfs_log_item *lip)
|
||||
{
|
||||
return container_of(lip, struct xfs_bud_log_item, bud_item);
|
||||
|
@ -1326,7 +1326,6 @@ xfs_collapse_file_space(
|
||||
int error;
|
||||
struct xfs_defer_ops dfops;
|
||||
xfs_fsblock_t first_block;
|
||||
xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
|
||||
xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
|
||||
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
|
||||
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
|
||||
@ -1361,7 +1360,7 @@ xfs_collapse_file_space(
|
||||
|
||||
xfs_defer_init(&dfops, &first_block);
|
||||
error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
|
||||
&done, stop_fsb, &first_block, &dfops);
|
||||
&done, &first_block, &dfops);
|
||||
if (error)
|
||||
goto out_bmap_cancel;
|
||||
|
||||
|
@ -1754,7 +1754,6 @@ xfs_buftarg_shrink_count(
|
||||
|
||||
void
|
||||
xfs_free_buftarg(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_buftarg *btp)
|
||||
{
|
||||
unregister_shrinker(&btp->bt_shrinker);
|
||||
|
@ -388,7 +388,7 @@ xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
|
||||
*/
|
||||
extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
|
||||
struct block_device *, struct dax_device *);
|
||||
extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
|
||||
extern void xfs_free_buftarg(struct xfs_buftarg *);
|
||||
extern void xfs_wait_buftarg(xfs_buftarg_t *);
|
||||
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
|
||||
|
||||
|
@ -50,19 +50,19 @@ xfs_trim_extents(
|
||||
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
|
||||
/*
|
||||
* Force out the log. This means any transactions that might have freed
|
||||
* space before we take the AGF buffer lock are now on disk, and the
|
||||
* volatile disk cache is flushed.
|
||||
*/
|
||||
xfs_log_force(mp, XFS_LOG_SYNC);
|
||||
|
||||
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
|
||||
if (error || !agbp)
|
||||
goto out_put_perag;
|
||||
|
||||
cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
|
||||
|
||||
/*
|
||||
* Force out the log. This means any transactions that might have freed
|
||||
* space before we took the AGF buffer lock are now on disk, and the
|
||||
* volatile disk cache is flushed.
|
||||
*/
|
||||
xfs_log_force(mp, XFS_LOG_SYNC);
|
||||
|
||||
/*
|
||||
* Look up the longest btree in the AGF and start with it.
|
||||
*/
|
||||
|
@ -50,6 +50,24 @@ xfs_efi_item_free(
|
||||
kmem_zone_free(xfs_efi_zone, efip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Freeing the efi requires that we remove it from the AIL if it has already
|
||||
* been placed there. However, the EFI may not yet have been placed in the AIL
|
||||
* when called by xfs_efi_release() from EFD processing due to the ordering of
|
||||
* committed vs unpin operations in bulk insert operations. Hence the reference
|
||||
* count to ensure only the last caller frees the EFI.
|
||||
*/
|
||||
void
|
||||
xfs_efi_release(
|
||||
struct xfs_efi_log_item *efip)
|
||||
{
|
||||
ASSERT(atomic_read(&efip->efi_refcount) > 0);
|
||||
if (atomic_dec_and_test(&efip->efi_refcount)) {
|
||||
xfs_trans_ail_remove(&efip->efi_item, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_efi_item_free(efip);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This returns the number of iovecs needed to log the given efi item.
|
||||
* We only need 1 iovec for an efi item. It just logs the efi_log_format
|
||||
@ -151,7 +169,7 @@ xfs_efi_item_unlock(
|
||||
struct xfs_log_item *lip)
|
||||
{
|
||||
if (lip->li_flags & XFS_LI_ABORTED)
|
||||
xfs_efi_item_free(EFI_ITEM(lip));
|
||||
xfs_efi_release(EFI_ITEM(lip));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -279,24 +297,6 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Freeing the efi requires that we remove it from the AIL if it has already
|
||||
* been placed there. However, the EFI may not yet have been placed in the AIL
|
||||
* when called by xfs_efi_release() from EFD processing due to the ordering of
|
||||
* committed vs unpin operations in bulk insert operations. Hence the reference
|
||||
* count to ensure only the last caller frees the EFI.
|
||||
*/
|
||||
void
|
||||
xfs_efi_release(
|
||||
struct xfs_efi_log_item *efip)
|
||||
{
|
||||
ASSERT(atomic_read(&efip->efi_refcount) > 0);
|
||||
if (atomic_dec_and_test(&efip->efi_refcount)) {
|
||||
xfs_trans_ail_remove(&efip->efi_item, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_efi_item_free(efip);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
|
||||
{
|
||||
return container_of(lip, struct xfs_efd_log_item, efd_item);
|
||||
|
@ -34,7 +34,6 @@
|
||||
|
||||
struct xfs_fstrm_item {
|
||||
struct xfs_mru_cache_elem mru;
|
||||
struct xfs_inode *ip;
|
||||
xfs_agnumber_t ag; /* AG in use for this directory */
|
||||
};
|
||||
|
||||
@ -122,14 +121,15 @@ xfs_filestream_put_ag(
|
||||
|
||||
static void
|
||||
xfs_fstrm_free_func(
|
||||
void *data,
|
||||
struct xfs_mru_cache_elem *mru)
|
||||
{
|
||||
struct xfs_mount *mp = data;
|
||||
struct xfs_fstrm_item *item =
|
||||
container_of(mru, struct xfs_fstrm_item, mru);
|
||||
|
||||
xfs_filestream_put_ag(item->ip->i_mount, item->ag);
|
||||
|
||||
trace_xfs_filestream_free(item->ip, item->ag);
|
||||
xfs_filestream_put_ag(mp, item->ag);
|
||||
trace_xfs_filestream_free(mp, mru->key, item->ag);
|
||||
|
||||
kmem_free(item);
|
||||
}
|
||||
@ -165,7 +165,7 @@ xfs_filestream_pick_ag(
|
||||
trylock = XFS_ALLOC_FLAG_TRYLOCK;
|
||||
|
||||
for (nscan = 0; 1; nscan++) {
|
||||
trace_xfs_filestream_scan(ip, ag);
|
||||
trace_xfs_filestream_scan(mp, ip->i_ino, ag);
|
||||
|
||||
pag = xfs_perag_get(mp, ag);
|
||||
|
||||
@ -198,7 +198,7 @@ xfs_filestream_pick_ag(
|
||||
goto next_ag;
|
||||
}
|
||||
|
||||
longest = xfs_alloc_longest_free_extent(mp, pag,
|
||||
longest = xfs_alloc_longest_free_extent(pag,
|
||||
xfs_alloc_min_freelist(mp, pag),
|
||||
xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
|
||||
if (((minlen && longest >= minlen) ||
|
||||
@ -265,7 +265,6 @@ next_ag:
|
||||
goto out_put_ag;
|
||||
|
||||
item->ag = *agp;
|
||||
item->ip = ip;
|
||||
|
||||
err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru);
|
||||
if (err) {
|
||||
@ -333,7 +332,7 @@ xfs_filestream_lookup_ag(
|
||||
ag = container_of(mru, struct xfs_fstrm_item, mru)->ag;
|
||||
xfs_mru_cache_done(mp->m_filestream);
|
||||
|
||||
trace_xfs_filestream_lookup(ip, ag);
|
||||
trace_xfs_filestream_lookup(mp, ip->i_ino, ag);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -399,7 +398,7 @@ xfs_filestream_new_ag(
|
||||
* Only free the item here so we skip over the old AG earlier.
|
||||
*/
|
||||
if (mru)
|
||||
xfs_fstrm_free_func(mru);
|
||||
xfs_fstrm_free_func(mp, mru);
|
||||
|
||||
IRELE(pip);
|
||||
exit:
|
||||
@ -426,8 +425,8 @@ xfs_filestream_mount(
|
||||
* timer tunable to within about 10 percent. This requires at least 10
|
||||
* groups.
|
||||
*/
|
||||
return xfs_mru_cache_create(&mp->m_filestream, xfs_fstrm_centisecs * 10,
|
||||
10, xfs_fstrm_free_func);
|
||||
return xfs_mru_cache_create(&mp->m_filestream, mp,
|
||||
xfs_fstrm_centisecs * 10, 10, xfs_fstrm_free_func);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -972,10 +972,8 @@ xfs_dir_ialloc(
|
||||
xfs_nlink_t nlink,
|
||||
dev_t rdev,
|
||||
prid_t prid, /* project id */
|
||||
xfs_inode_t **ipp, /* pointer to inode; it will be
|
||||
xfs_inode_t **ipp) /* pointer to inode; it will be
|
||||
locked. */
|
||||
int *committed)
|
||||
|
||||
{
|
||||
xfs_trans_t *tp;
|
||||
xfs_inode_t *ip;
|
||||
@ -1050,8 +1048,6 @@ xfs_dir_ialloc(
|
||||
}
|
||||
|
||||
code = xfs_trans_roll(&tp);
|
||||
if (committed != NULL)
|
||||
*committed = 1;
|
||||
|
||||
/*
|
||||
* Re-attach the quota info that we detached from prev trx.
|
||||
@ -1088,9 +1084,6 @@ xfs_dir_ialloc(
|
||||
}
|
||||
ASSERT(!ialloc_context && ip);
|
||||
|
||||
} else {
|
||||
if (committed != NULL)
|
||||
*committed = 0;
|
||||
}
|
||||
|
||||
*ipp = ip;
|
||||
@ -1217,8 +1210,7 @@ xfs_create(
|
||||
* entry pointing to them, but a directory also the "." entry
|
||||
* pointing to itself.
|
||||
*/
|
||||
error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip,
|
||||
NULL);
|
||||
error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
@ -1309,7 +1301,6 @@ xfs_create(
|
||||
int
|
||||
xfs_create_tmpfile(
|
||||
struct xfs_inode *dp,
|
||||
struct dentry *dentry,
|
||||
umode_t mode,
|
||||
struct xfs_inode **ipp)
|
||||
{
|
||||
@ -1351,7 +1342,7 @@ xfs_create_tmpfile(
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip, NULL);
|
||||
error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
@ -1611,13 +1602,15 @@ xfs_itruncate_extents(
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Remove all pending CoW reservations. */
|
||||
error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
|
||||
last_block, true);
|
||||
if (error)
|
||||
goto out;
|
||||
if (whichfork == XFS_DATA_FORK) {
|
||||
/* Remove all pending CoW reservations. */
|
||||
error = xfs_reflink_cancel_cow_blocks(ip, &tp,
|
||||
first_unmap_block, last_block, true);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
xfs_itruncate_clear_reflink_flags(ip);
|
||||
xfs_itruncate_clear_reflink_flags(ip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Always re-log the inode so that our permanent transaction can keep
|
||||
@ -2903,7 +2896,7 @@ xfs_rename_alloc_whiteout(
|
||||
struct xfs_inode *tmpfile;
|
||||
int error;
|
||||
|
||||
error = xfs_create_tmpfile(dp, NULL, S_IFCHR | WHITEOUT_MODE, &tmpfile);
|
||||
error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -393,8 +393,8 @@ int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
|
||||
struct xfs_inode **ipp, struct xfs_name *ci_name);
|
||||
int xfs_create(struct xfs_inode *dp, struct xfs_name *name,
|
||||
umode_t mode, dev_t rdev, struct xfs_inode **ipp);
|
||||
int xfs_create_tmpfile(struct xfs_inode *dp, struct dentry *dentry,
|
||||
umode_t mode, struct xfs_inode **ipp);
|
||||
int xfs_create_tmpfile(struct xfs_inode *dp, umode_t mode,
|
||||
struct xfs_inode **ipp);
|
||||
int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
|
||||
struct xfs_inode *ip);
|
||||
int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
|
||||
@ -431,7 +431,7 @@ xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
|
||||
|
||||
int xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t,
|
||||
xfs_nlink_t, dev_t, prid_t,
|
||||
struct xfs_inode **, int *);
|
||||
struct xfs_inode **);
|
||||
|
||||
/* from xfs_file.c */
|
||||
enum xfs_prealloc_flags {
|
||||
|
@ -177,7 +177,7 @@ xfs_generic_create(
|
||||
if (!tmpfile) {
|
||||
error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
|
||||
} else {
|
||||
error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
|
||||
error = xfs_create_tmpfile(XFS_I(dir), mode, &ip);
|
||||
}
|
||||
if (unlikely(error))
|
||||
goto out_free_acl;
|
||||
|
@ -560,7 +560,6 @@ xfs_log_done(
|
||||
*/
|
||||
int
|
||||
xfs_log_notify(
|
||||
struct xfs_mount *mp,
|
||||
struct xlog_in_core *iclog,
|
||||
xfs_log_callback_t *cb)
|
||||
{
|
||||
|
@ -141,8 +141,7 @@ int xfs_log_mount_cancel(struct xfs_mount *);
|
||||
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
|
||||
xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
|
||||
void xfs_log_space_wake(struct xfs_mount *mp);
|
||||
int xfs_log_notify(struct xfs_mount *mp,
|
||||
struct xlog_in_core *iclog,
|
||||
int xfs_log_notify(struct xlog_in_core *iclog,
|
||||
struct xfs_log_callback *callback_entry);
|
||||
int xfs_log_release_iclog(struct xfs_mount *mp,
|
||||
struct xlog_in_core *iclog);
|
||||
|
@ -848,7 +848,7 @@ restart:
|
||||
/* attach all the transactions w/ busy extents to iclog */
|
||||
ctx->log_cb.cb_func = xlog_cil_committed;
|
||||
ctx->log_cb.cb_arg = ctx;
|
||||
error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb);
|
||||
error = xfs_log_notify(commit_iclog, &ctx->log_cb);
|
||||
if (error)
|
||||
goto out_abort;
|
||||
|
||||
|
@ -112,6 +112,7 @@ struct xfs_mru_cache {
|
||||
xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
|
||||
struct delayed_work work; /* Workqueue data for reaping. */
|
||||
unsigned int queued; /* work has been queued */
|
||||
void *data;
|
||||
};
|
||||
|
||||
static struct workqueue_struct *xfs_mru_reap_wq;
|
||||
@ -259,7 +260,7 @@ _xfs_mru_cache_clear_reap_list(
|
||||
|
||||
list_for_each_entry_safe(elem, next, &tmp, list_node) {
|
||||
list_del_init(&elem->list_node);
|
||||
mru->free_func(elem);
|
||||
mru->free_func(mru->data, elem);
|
||||
}
|
||||
|
||||
spin_lock(&mru->lock);
|
||||
@ -326,6 +327,7 @@ xfs_mru_cache_uninit(void)
|
||||
int
|
||||
xfs_mru_cache_create(
|
||||
struct xfs_mru_cache **mrup,
|
||||
void *data,
|
||||
unsigned int lifetime_ms,
|
||||
unsigned int grp_count,
|
||||
xfs_mru_cache_free_func_t free_func)
|
||||
@ -369,7 +371,7 @@ xfs_mru_cache_create(
|
||||
|
||||
mru->grp_time = grp_time;
|
||||
mru->free_func = free_func;
|
||||
|
||||
mru->data = data;
|
||||
*mrup = mru;
|
||||
|
||||
exit:
|
||||
@ -492,7 +494,7 @@ xfs_mru_cache_delete(
|
||||
|
||||
elem = xfs_mru_cache_remove(mru, key);
|
||||
if (elem)
|
||||
mru->free_func(elem);
|
||||
mru->free_func(mru->data, elem);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -26,13 +26,13 @@ struct xfs_mru_cache_elem {
|
||||
};
|
||||
|
||||
/* Function pointer type for callback to free a client's data pointer. */
|
||||
typedef void (*xfs_mru_cache_free_func_t)(struct xfs_mru_cache_elem *elem);
|
||||
typedef void (*xfs_mru_cache_free_func_t)(void *, struct xfs_mru_cache_elem *);
|
||||
|
||||
int xfs_mru_cache_init(void);
|
||||
void xfs_mru_cache_uninit(void);
|
||||
int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
|
||||
unsigned int grp_count,
|
||||
xfs_mru_cache_free_func_t free_func);
|
||||
int xfs_mru_cache_create(struct xfs_mru_cache **mrup, void *data,
|
||||
unsigned int lifetime_ms, unsigned int grp_count,
|
||||
xfs_mru_cache_free_func_t free_func);
|
||||
void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
|
||||
int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
|
||||
struct xfs_mru_cache_elem *elem);
|
||||
|
@ -748,7 +748,6 @@ xfs_qm_qino_alloc(
|
||||
{
|
||||
xfs_trans_t *tp;
|
||||
int error;
|
||||
int committed;
|
||||
bool need_alloc = true;
|
||||
|
||||
*ip = NULL;
|
||||
@ -788,8 +787,7 @@ xfs_qm_qino_alloc(
|
||||
return error;
|
||||
|
||||
if (need_alloc) {
|
||||
error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip,
|
||||
&committed);
|
||||
error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp);
|
||||
return error;
|
||||
|
@ -52,6 +52,25 @@ xfs_cui_item_free(
|
||||
kmem_zone_free(xfs_cui_zone, cuip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Freeing the CUI requires that we remove it from the AIL if it has already
|
||||
* been placed there. However, the CUI may not yet have been placed in the AIL
|
||||
* when called by xfs_cui_release() from CUD processing due to the ordering of
|
||||
* committed vs unpin operations in bulk insert operations. Hence the reference
|
||||
* count to ensure only the last caller frees the CUI.
|
||||
*/
|
||||
void
|
||||
xfs_cui_release(
|
||||
struct xfs_cui_log_item *cuip)
|
||||
{
|
||||
ASSERT(atomic_read(&cuip->cui_refcount) > 0);
|
||||
if (atomic_dec_and_test(&cuip->cui_refcount)) {
|
||||
xfs_trans_ail_remove(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_cui_item_free(cuip);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
STATIC void
|
||||
xfs_cui_item_size(
|
||||
struct xfs_log_item *lip,
|
||||
@ -141,7 +160,7 @@ xfs_cui_item_unlock(
|
||||
struct xfs_log_item *lip)
|
||||
{
|
||||
if (lip->li_flags & XFS_LI_ABORTED)
|
||||
xfs_cui_item_free(CUI_ITEM(lip));
|
||||
xfs_cui_release(CUI_ITEM(lip));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -211,24 +230,6 @@ xfs_cui_init(
|
||||
return cuip;
|
||||
}
|
||||
|
||||
/*
|
||||
* Freeing the CUI requires that we remove it from the AIL if it has already
|
||||
* been placed there. However, the CUI may not yet have been placed in the AIL
|
||||
* when called by xfs_cui_release() from CUD processing due to the ordering of
|
||||
* committed vs unpin operations in bulk insert operations. Hence the reference
|
||||
* count to ensure only the last caller frees the CUI.
|
||||
*/
|
||||
void
|
||||
xfs_cui_release(
|
||||
struct xfs_cui_log_item *cuip)
|
||||
{
|
||||
ASSERT(atomic_read(&cuip->cui_refcount) > 0);
|
||||
if (atomic_dec_and_test(&cuip->cui_refcount)) {
|
||||
xfs_trans_ail_remove(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_cui_item_free(cuip);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
|
||||
{
|
||||
return container_of(lip, struct xfs_cud_log_item, cud_item);
|
||||
|
@ -52,6 +52,24 @@ xfs_rui_item_free(
|
||||
kmem_zone_free(xfs_rui_zone, ruip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Freeing the RUI requires that we remove it from the AIL if it has already
|
||||
* been placed there. However, the RUI may not yet have been placed in the AIL
|
||||
* when called by xfs_rui_release() from RUD processing due to the ordering of
|
||||
* committed vs unpin operations in bulk insert operations. Hence the reference
|
||||
* count to ensure only the last caller frees the RUI.
|
||||
*/
|
||||
void
|
||||
xfs_rui_release(
|
||||
struct xfs_rui_log_item *ruip)
|
||||
{
|
||||
ASSERT(atomic_read(&ruip->rui_refcount) > 0);
|
||||
if (atomic_dec_and_test(&ruip->rui_refcount)) {
|
||||
xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_rui_item_free(ruip);
|
||||
}
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_rui_item_size(
|
||||
struct xfs_log_item *lip,
|
||||
@ -141,7 +159,7 @@ xfs_rui_item_unlock(
|
||||
struct xfs_log_item *lip)
|
||||
{
|
||||
if (lip->li_flags & XFS_LI_ABORTED)
|
||||
xfs_rui_item_free(RUI_ITEM(lip));
|
||||
xfs_rui_release(RUI_ITEM(lip));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -233,24 +251,6 @@ xfs_rui_copy_format(
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Freeing the RUI requires that we remove it from the AIL if it has already
|
||||
* been placed there. However, the RUI may not yet have been placed in the AIL
|
||||
* when called by xfs_rui_release() from RUD processing due to the ordering of
|
||||
* committed vs unpin operations in bulk insert operations. Hence the reference
|
||||
* count to ensure only the last caller frees the RUI.
|
||||
*/
|
||||
void
|
||||
xfs_rui_release(
|
||||
struct xfs_rui_log_item *ruip)
|
||||
{
|
||||
ASSERT(atomic_read(&ruip->rui_refcount) > 0);
|
||||
if (atomic_dec_and_test(&ruip->rui_refcount)) {
|
||||
xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_rui_item_free(ruip);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
|
||||
{
|
||||
return container_of(lip, struct xfs_rud_log_item, rud_item);
|
||||
|
@ -722,7 +722,7 @@ xfs_close_devices(
|
||||
struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
|
||||
struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
|
||||
|
||||
xfs_free_buftarg(mp, mp->m_logdev_targp);
|
||||
xfs_free_buftarg(mp->m_logdev_targp);
|
||||
xfs_blkdev_put(logdev);
|
||||
fs_put_dax(dax_logdev);
|
||||
}
|
||||
@ -730,11 +730,11 @@ xfs_close_devices(
|
||||
struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
|
||||
struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
|
||||
|
||||
xfs_free_buftarg(mp, mp->m_rtdev_targp);
|
||||
xfs_free_buftarg(mp->m_rtdev_targp);
|
||||
xfs_blkdev_put(rtdev);
|
||||
fs_put_dax(dax_rtdev);
|
||||
}
|
||||
xfs_free_buftarg(mp, mp->m_ddev_targp);
|
||||
xfs_free_buftarg(mp->m_ddev_targp);
|
||||
fs_put_dax(dax_ddev);
|
||||
}
|
||||
|
||||
@ -808,9 +808,9 @@ xfs_open_devices(
|
||||
|
||||
out_free_rtdev_targ:
|
||||
if (mp->m_rtdev_targp)
|
||||
xfs_free_buftarg(mp, mp->m_rtdev_targp);
|
||||
xfs_free_buftarg(mp->m_rtdev_targp);
|
||||
out_free_ddev_targ:
|
||||
xfs_free_buftarg(mp, mp->m_ddev_targp);
|
||||
xfs_free_buftarg(mp->m_ddev_targp);
|
||||
out_close_rtdev:
|
||||
xfs_blkdev_put(rtdev);
|
||||
fs_put_dax(dax_rtdev);
|
||||
@ -1247,7 +1247,6 @@ xfs_quiesce_attr(
|
||||
STATIC int
|
||||
xfs_test_remount_options(
|
||||
struct super_block *sb,
|
||||
struct xfs_mount *mp,
|
||||
char *options)
|
||||
{
|
||||
int error = 0;
|
||||
@ -1278,7 +1277,7 @@ xfs_fs_remount(
|
||||
int error;
|
||||
|
||||
/* First, check for complete junk; i.e. invalid options */
|
||||
error = xfs_test_remount_options(sb, mp, options);
|
||||
error = xfs_test_remount_options(sb, options);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -264,7 +264,7 @@ xfs_symlink(
|
||||
* Allocate an inode for the symlink.
|
||||
*/
|
||||
error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
|
||||
prid, &ip, NULL);
|
||||
prid, &ip);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
|
@ -506,8 +506,8 @@ DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_filestream_class,
|
||||
TP_PROTO(struct xfs_inode *ip, xfs_agnumber_t agno),
|
||||
TP_ARGS(ip, agno),
|
||||
TP_PROTO(struct xfs_mount *mp, xfs_ino_t ino, xfs_agnumber_t agno),
|
||||
TP_ARGS(mp, ino, agno),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(xfs_ino_t, ino)
|
||||
@ -515,10 +515,10 @@ DECLARE_EVENT_CLASS(xfs_filestream_class,
|
||||
__field(int, streams)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = VFS_I(ip)->i_sb->s_dev;
|
||||
__entry->ino = ip->i_ino;
|
||||
__entry->dev = mp->m_super->s_dev;
|
||||
__entry->ino = ino;
|
||||
__entry->agno = agno;
|
||||
__entry->streams = xfs_filestream_peek_ag(ip->i_mount, agno);
|
||||
__entry->streams = xfs_filestream_peek_ag(mp, agno);
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx agno %u streams %d",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
@ -528,8 +528,8 @@ DECLARE_EVENT_CLASS(xfs_filestream_class,
|
||||
)
|
||||
#define DEFINE_FILESTREAM_EVENT(name) \
|
||||
DEFINE_EVENT(xfs_filestream_class, name, \
|
||||
TP_PROTO(struct xfs_inode *ip, xfs_agnumber_t agno), \
|
||||
TP_ARGS(ip, agno))
|
||||
TP_PROTO(struct xfs_mount *mp, xfs_ino_t ino, xfs_agnumber_t agno), \
|
||||
TP_ARGS(mp, ino, agno))
|
||||
DEFINE_FILESTREAM_EVENT(xfs_filestream_free);
|
||||
DEFINE_FILESTREAM_EVENT(xfs_filestream_lookup);
|
||||
DEFINE_FILESTREAM_EVENT(xfs_filestream_scan);
|
||||
|
Loading…
Reference in New Issue
Block a user