2018-06-06 02:42:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-11-02 03:58:39 +00:00
|
|
|
* Copyright (c) 2000,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#ifndef __XFS_STATS_H__
|
|
|
|
#define __XFS_STATS_H__
|
|
|
|
|
|
|
|
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
|
2016-12-05 03:38:58 +00:00
|
|
|
/*
|
|
|
|
* The btree stats arrays have fixed offsets for the different stats. We
|
|
|
|
* store the base index in the btree cursor via XFS_STATS_CALC_INDEX() and
|
|
|
|
* that allows us to use fixed offsets into the stats array for each btree
|
|
|
|
* stat. These index offsets are defined in the order they will be emitted
|
|
|
|
* in the stats files, so it is possible to add new btree stat types by
|
|
|
|
* appending to the enum list below.
|
|
|
|
*/
|
|
|
|
enum {
|
|
|
|
__XBTS_lookup = 0,
|
|
|
|
__XBTS_compare = 1,
|
|
|
|
__XBTS_insrec = 2,
|
|
|
|
__XBTS_delrec = 3,
|
|
|
|
__XBTS_newroot = 4,
|
|
|
|
__XBTS_killroot = 5,
|
|
|
|
__XBTS_increment = 6,
|
|
|
|
__XBTS_decrement = 7,
|
|
|
|
__XBTS_lshift = 8,
|
|
|
|
__XBTS_rshift = 9,
|
|
|
|
__XBTS_split = 10,
|
|
|
|
__XBTS_join = 11,
|
|
|
|
__XBTS_alloc = 12,
|
|
|
|
__XBTS_free = 13,
|
|
|
|
__XBTS_moves = 14,
|
|
|
|
|
|
|
|
__XBTS_MAX = 15,
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* XFS global statistics
|
|
|
|
*/
|
2016-12-05 03:38:58 +00:00
|
|
|
struct __xfsstats {
|
2017-06-16 18:00:05 +00:00
|
|
|
uint32_t xs_allocx;
|
|
|
|
uint32_t xs_allocb;
|
|
|
|
uint32_t xs_freex;
|
|
|
|
uint32_t xs_freeb;
|
|
|
|
uint32_t xs_abt_lookup;
|
|
|
|
uint32_t xs_abt_compare;
|
|
|
|
uint32_t xs_abt_insrec;
|
|
|
|
uint32_t xs_abt_delrec;
|
|
|
|
uint32_t xs_blk_mapr;
|
|
|
|
uint32_t xs_blk_mapw;
|
|
|
|
uint32_t xs_blk_unmap;
|
|
|
|
uint32_t xs_add_exlist;
|
|
|
|
uint32_t xs_del_exlist;
|
|
|
|
uint32_t xs_look_exlist;
|
|
|
|
uint32_t xs_cmp_exlist;
|
|
|
|
uint32_t xs_bmbt_lookup;
|
|
|
|
uint32_t xs_bmbt_compare;
|
|
|
|
uint32_t xs_bmbt_insrec;
|
|
|
|
uint32_t xs_bmbt_delrec;
|
|
|
|
uint32_t xs_dir_lookup;
|
|
|
|
uint32_t xs_dir_create;
|
|
|
|
uint32_t xs_dir_remove;
|
|
|
|
uint32_t xs_dir_getdents;
|
|
|
|
uint32_t xs_trans_sync;
|
|
|
|
uint32_t xs_trans_async;
|
|
|
|
uint32_t xs_trans_empty;
|
|
|
|
uint32_t xs_ig_attempts;
|
|
|
|
uint32_t xs_ig_found;
|
|
|
|
uint32_t xs_ig_frecycle;
|
|
|
|
uint32_t xs_ig_missed;
|
|
|
|
uint32_t xs_ig_dup;
|
|
|
|
uint32_t xs_ig_reclaims;
|
|
|
|
uint32_t xs_ig_attrchg;
|
|
|
|
uint32_t xs_log_writes;
|
|
|
|
uint32_t xs_log_blocks;
|
|
|
|
uint32_t xs_log_noiclogs;
|
|
|
|
uint32_t xs_log_force;
|
|
|
|
uint32_t xs_log_force_sleep;
|
|
|
|
uint32_t xs_try_logspace;
|
|
|
|
uint32_t xs_sleep_logspace;
|
|
|
|
uint32_t xs_push_ail;
|
|
|
|
uint32_t xs_push_ail_success;
|
|
|
|
uint32_t xs_push_ail_pushbuf;
|
|
|
|
uint32_t xs_push_ail_pinned;
|
|
|
|
uint32_t xs_push_ail_locked;
|
|
|
|
uint32_t xs_push_ail_flushing;
|
|
|
|
uint32_t xs_push_ail_restarts;
|
|
|
|
uint32_t xs_push_ail_flush;
|
|
|
|
uint32_t xs_xstrat_quick;
|
|
|
|
uint32_t xs_xstrat_split;
|
|
|
|
uint32_t xs_write_calls;
|
|
|
|
uint32_t xs_read_calls;
|
|
|
|
uint32_t xs_attr_get;
|
|
|
|
uint32_t xs_attr_set;
|
|
|
|
uint32_t xs_attr_remove;
|
|
|
|
uint32_t xs_attr_list;
|
|
|
|
uint32_t xs_iflush_count;
|
|
|
|
uint32_t xs_icluster_flushcnt;
|
|
|
|
uint32_t xs_icluster_flushinode;
|
|
|
|
uint32_t vn_active; /* # vnodes not on free lists */
|
|
|
|
uint32_t vn_alloc; /* # times vn_alloc called */
|
|
|
|
uint32_t vn_get; /* # times vn_get called */
|
|
|
|
uint32_t vn_hold; /* # times vn_hold called */
|
|
|
|
uint32_t vn_rele; /* # times vn_rele called */
|
|
|
|
uint32_t vn_reclaim; /* # times vn_reclaim called */
|
|
|
|
uint32_t vn_remove; /* # times vn_remove called */
|
|
|
|
uint32_t vn_free; /* # times vn_free called */
|
|
|
|
uint32_t xb_get;
|
|
|
|
uint32_t xb_create;
|
|
|
|
uint32_t xb_get_locked;
|
|
|
|
uint32_t xb_get_locked_waited;
|
|
|
|
uint32_t xb_busy_locked;
|
|
|
|
uint32_t xb_miss_locked;
|
|
|
|
uint32_t xb_page_retries;
|
|
|
|
uint32_t xb_page_found;
|
|
|
|
uint32_t xb_get_read;
|
2008-10-30 05:55:03 +00:00
|
|
|
/* Version 2 btree counters */
|
2017-06-16 18:00:05 +00:00
|
|
|
uint32_t xs_abtb_2[__XBTS_MAX];
|
|
|
|
uint32_t xs_abtc_2[__XBTS_MAX];
|
|
|
|
uint32_t xs_bmbt_2[__XBTS_MAX];
|
|
|
|
uint32_t xs_ibt_2[__XBTS_MAX];
|
|
|
|
uint32_t xs_fibt_2[__XBTS_MAX];
|
|
|
|
uint32_t xs_rmap_2[__XBTS_MAX];
|
|
|
|
uint32_t xs_refcbt_2[__XBTS_MAX];
|
2024-02-22 20:43:39 +00:00
|
|
|
uint32_t xs_rmap_mem_2[__XBTS_MAX];
|
xfs: define an in-memory btree for storing refcount bag info during repairs
Create a new in-memory btree type so that we can store refcount bag info
in a much more memory-efficient and performant format. Recall that the
refcount recordset regenerator computes the new recordset from browsing
the rmap records. Let's say that the rmap records are:
{agbno: 10, length: 40, ...}
{agbno: 11, length: 3, ...}
{agbno: 12, length: 20, ...}
{agbno: 15, length: 1, ...}
It is convenient to have a data structure that could quickly tell us the
refcount for an arbitrary agbno without wasting memory. An array or a
list could do that pretty easily. List suck because of the pointer
overhead. xfarrays are a lot more compact, but we want to minimize
sparse holes in the xfarray to constrain memory usage. Maintaining any
kind of record order isn't needed for correctness, so I created the
"rcbag", which is shorthand for an unordered list of (excerpted) reverse
mappings.
So we add the first rmap to the rcbag, and it looks like:
0: {agbno: 10, length: 40}
The refcount for agbno 10 is 1. Then we move on to block 11, so we add
the second rmap:
0: {agbno: 10, length: 40}
1: {agbno: 11, length: 3}
The refcount for agbno 11 is 2. We move on to block 12, so we add the
third:
0: {agbno: 10, length: 40}
1: {agbno: 11, length: 3}
2: {agbno: 12, length: 20}
The refcount for agbno 12 and 13 is 3. We move on to block 14, and
remove the second rmap:
0: {agbno: 10, length: 40}
1: NULL
2: {agbno: 12, length: 20}
The refcount for agbno 14 is 2. We move on to block 15, and add the
last rmap. But we don't care where it is and we don't want to expand
the array so we put it in slot 1:
0: {agbno: 10, length: 40}
1: {agbno: 15, length: 1}
2: {agbno: 12, length: 20}
The refcount for block 15 is 3. Notice how order doesn't matter in this
list? That's why repair uses an unordered list, or "bag". The data
structure is not a set because it does not guarantee uniqueness.
That said, adding and removing specific items is now an O(n) operation
because we have no idea where that item might be in the list. Overall,
the runtime is O(n^2) which is bad.
I realized that I could easily refactor the btree code and reimplement
the refcount bag with an xfbtree. Adding and removing is now O(log2 n),
so the runtime is at least O(n log2 n), which is much faster. In the
end, the rcbag becomes a sorted list, but that's merely a detail of the
implementation. The repair code doesn't care.
(Note: That horrible xfs_db bmap_inflate command can be used to exercise
this sort of rcbag insanity by cranking up refcounts quickly.)
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2024-02-22 20:43:40 +00:00
|
|
|
uint32_t xs_rcbag_2[__XBTS_MAX];
|
2017-06-16 18:00:05 +00:00
|
|
|
uint32_t xs_qm_dqreclaims;
|
|
|
|
uint32_t xs_qm_dqreclaim_misses;
|
|
|
|
uint32_t xs_qm_dquot_dups;
|
|
|
|
uint32_t xs_qm_dqcachemisses;
|
|
|
|
uint32_t xs_qm_dqcachehits;
|
|
|
|
uint32_t xs_qm_dqwants;
|
|
|
|
uint32_t xs_qm_dquot;
|
|
|
|
uint32_t xs_qm_dquot_unused;
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Extra precision counters */
|
2017-06-16 18:00:05 +00:00
|
|
|
uint64_t xs_xstrat_bytes;
|
|
|
|
uint64_t xs_write_bytes;
|
|
|
|
uint64_t xs_read_bytes;
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-27 23:18:13 +00:00
|
|
|
uint64_t defer_relog;
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2018-10-18 06:21:39 +00:00
|
|
|
#define xfsstats_offset(f) (offsetof(struct __xfsstats, f)/sizeof(uint32_t))
|
|
|
|
|
2016-12-05 03:38:58 +00:00
|
|
|
struct xfsstats {
|
|
|
|
union {
|
|
|
|
struct __xfsstats s;
|
2018-10-18 06:21:39 +00:00
|
|
|
uint32_t a[xfsstats_offset(xs_qm_dquot)];
|
2016-12-05 03:38:58 +00:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* simple wrapper for getting the array index of s struct member offset
|
|
|
|
*/
|
|
|
|
#define XFS_STATS_CALC_INDEX(member) \
|
2017-06-16 18:00:05 +00:00
|
|
|
(offsetof(struct __xfsstats, member) / (int)sizeof(uint32_t))
|
2016-12-05 03:38:58 +00:00
|
|
|
|
|
|
|
|
2015-10-11 18:19:45 +00:00
|
|
|
int xfs_stats_format(struct xfsstats __percpu *stats, char *buf);
|
|
|
|
void xfs_stats_clearall(struct xfsstats __percpu *stats);
|
|
|
|
extern struct xstats xfsstats;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-10-12 07:21:22 +00:00
|
|
|
#define XFS_STATS_INC(mp, v) \
|
|
|
|
do { \
|
2016-12-05 03:38:58 +00:00
|
|
|
per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v++; \
|
|
|
|
per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v++; \
|
2015-10-12 07:21:22 +00:00
|
|
|
} while (0)
|
2015-10-11 18:19:45 +00:00
|
|
|
|
2015-10-12 07:21:22 +00:00
|
|
|
#define XFS_STATS_DEC(mp, v) \
|
|
|
|
do { \
|
2016-12-05 03:38:58 +00:00
|
|
|
per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v--; \
|
|
|
|
per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v--; \
|
2015-10-12 07:21:22 +00:00
|
|
|
} while (0)
|
2015-10-11 18:19:45 +00:00
|
|
|
|
2015-10-12 07:21:22 +00:00
|
|
|
#define XFS_STATS_ADD(mp, v, inc) \
|
|
|
|
do { \
|
2016-12-05 03:38:58 +00:00
|
|
|
per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v += (inc); \
|
|
|
|
per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v += (inc); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define XFS_STATS_INC_OFF(mp, off) \
|
|
|
|
do { \
|
|
|
|
per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off]++; \
|
|
|
|
per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off]++; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define XFS_STATS_DEC_OFF(mp, off) \
|
|
|
|
do { \
|
|
|
|
per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off]; \
|
|
|
|
per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off]; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define XFS_STATS_ADD_OFF(mp, off, inc) \
|
|
|
|
do { \
|
|
|
|
per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off] += (inc); \
|
|
|
|
per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off] += (inc); \
|
2015-10-12 07:21:22 +00:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#if defined(CONFIG_PROC_FS)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-18 07:11:46 +00:00
|
|
|
extern int xfs_init_procfs(void);
|
2005-04-16 22:20:36 +00:00
|
|
|
extern void xfs_cleanup_procfs(void);
|
|
|
|
|
|
|
|
|
|
|
|
#else /* !CONFIG_PROC_FS */
|
|
|
|
|
2008-07-18 07:11:46 +00:00
|
|
|
static inline int xfs_init_procfs(void)
|
|
|
|
{
|
2008-07-18 07:12:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-18 07:11:46 +00:00
|
|
|
static inline void xfs_cleanup_procfs(void)
|
|
|
|
{
|
2008-07-18 07:12:50 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#endif /* !CONFIG_PROC_FS */
|
|
|
|
|
|
|
|
#endif /* __XFS_STATS_H__ */
|