2018-06-06 02:42:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-11-02 03:58:39 +00:00
|
|
|
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
|
2015-10-11 18:19:45 +00:00
|
|
|
struct xstats xfsstats;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-10-11 18:19:45 +00:00
|
|
|
static int counter_val(struct xfsstats __percpu *stats, int idx)
|
2012-03-13 08:52:33 +00:00
|
|
|
{
|
|
|
|
int val = 0, cpu;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
2015-10-11 18:19:45 +00:00
|
|
|
val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
|
2012-03-13 08:52:33 +00:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2015-10-11 18:19:45 +00:00
|
|
|
int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
|
2015-10-11 18:15:45 +00:00
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
int len = 0;
|
2017-06-16 18:00:05 +00:00
|
|
|
uint64_t xs_xstrat_bytes = 0;
|
|
|
|
uint64_t xs_write_bytes = 0;
|
|
|
|
uint64_t xs_read_bytes = 0;
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-27 23:18:13 +00:00
|
|
|
uint64_t defer_relog = 0;
|
2015-10-11 18:15:45 +00:00
|
|
|
|
|
|
|
static const struct xstats_entry {
|
|
|
|
char *desc;
|
|
|
|
int endpoint;
|
|
|
|
} xstats[] = {
|
2018-10-18 06:21:39 +00:00
|
|
|
{ "extent_alloc", xfsstats_offset(xs_abt_lookup) },
|
|
|
|
{ "abt", xfsstats_offset(xs_blk_mapr) },
|
|
|
|
{ "blk_map", xfsstats_offset(xs_bmbt_lookup) },
|
|
|
|
{ "bmbt", xfsstats_offset(xs_dir_lookup) },
|
|
|
|
{ "dir", xfsstats_offset(xs_trans_sync) },
|
|
|
|
{ "trans", xfsstats_offset(xs_ig_attempts) },
|
|
|
|
{ "ig", xfsstats_offset(xs_log_writes) },
|
|
|
|
{ "log", xfsstats_offset(xs_try_logspace)},
|
|
|
|
{ "push_ail", xfsstats_offset(xs_xstrat_quick)},
|
|
|
|
{ "xstrat", xfsstats_offset(xs_write_calls) },
|
|
|
|
{ "rw", xfsstats_offset(xs_attr_get) },
|
|
|
|
{ "attr", xfsstats_offset(xs_iflush_count)},
|
|
|
|
{ "icluster", xfsstats_offset(vn_active) },
|
|
|
|
{ "vnodes", xfsstats_offset(xb_get) },
|
|
|
|
{ "buf", xfsstats_offset(xs_abtb_2) },
|
|
|
|
{ "abtb2", xfsstats_offset(xs_abtc_2) },
|
|
|
|
{ "abtc2", xfsstats_offset(xs_bmbt_2) },
|
|
|
|
{ "bmbt2", xfsstats_offset(xs_ibt_2) },
|
|
|
|
{ "ibt2", xfsstats_offset(xs_fibt_2) },
|
|
|
|
{ "fibt2", xfsstats_offset(xs_rmap_2) },
|
|
|
|
{ "rmapbt", xfsstats_offset(xs_refcbt_2) },
|
2024-02-22 20:43:39 +00:00
|
|
|
{ "refcntbt", xfsstats_offset(xs_rmap_mem_2) },
|
xfs: define an in-memory btree for storing refcount bag info during repairs
Create a new in-memory btree type so that we can store refcount bag info
in a much more memory-efficient and performant format. Recall that the
refcount recordset regenerator computes the new recordset from browsing
the rmap records. Let's say that the rmap records are:
{agbno: 10, length: 40, ...}
{agbno: 11, length: 3, ...}
{agbno: 12, length: 20, ...}
{agbno: 15, length: 1, ...}
It is convenient to have a data structure that could quickly tell us the
refcount for an arbitrary agbno without wasting memory. An array or a
list could do that pretty easily. List suck because of the pointer
overhead. xfarrays are a lot more compact, but we want to minimize
sparse holes in the xfarray to constrain memory usage. Maintaining any
kind of record order isn't needed for correctness, so I created the
"rcbag", which is shorthand for an unordered list of (excerpted) reverse
mappings.
So we add the first rmap to the rcbag, and it looks like:
0: {agbno: 10, length: 40}
The refcount for agbno 10 is 1. Then we move on to block 11, so we add
the second rmap:
0: {agbno: 10, length: 40}
1: {agbno: 11, length: 3}
The refcount for agbno 11 is 2. We move on to block 12, so we add the
third:
0: {agbno: 10, length: 40}
1: {agbno: 11, length: 3}
2: {agbno: 12, length: 20}
The refcount for agbno 12 and 13 is 3. We move on to block 14, and
remove the second rmap:
0: {agbno: 10, length: 40}
1: NULL
2: {agbno: 12, length: 20}
The refcount for agbno 14 is 2. We move on to block 15, and add the
last rmap. But we don't care where it is and we don't want to expand
the array so we put it in slot 1:
0: {agbno: 10, length: 40}
1: {agbno: 15, length: 1}
2: {agbno: 12, length: 20}
The refcount for block 15 is 3. Notice how order doesn't matter in this
list? That's why repair uses an unordered list, or "bag". The data
structure is not a set because it does not guarantee uniqueness.
That said, adding and removing specific items is now an O(n) operation
because we have no idea where that item might be in the list. Overall,
the runtime is O(n^2) which is bad.
I realized that I could easily refactor the btree code and reimplement
the refcount bag with an xfbtree. Adding and removing is now O(log2 n),
so the runtime is at least O(n log2 n), which is much faster. In the
end, the rcbag becomes a sorted list, but that's merely a detail of the
implementation. The repair code doesn't care.
(Note: That horrible xfs_db bmap_inflate command can be used to exercise
this sort of rcbag insanity by cranking up refcounts quickly.)
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2024-02-22 20:43:40 +00:00
|
|
|
{ "rmapbt_mem", xfsstats_offset(xs_rcbag_2) },
|
|
|
|
{ "rcbagbt", xfsstats_offset(xs_qm_dqreclaims)},
|
2015-10-11 18:15:45 +00:00
|
|
|
/* we print both series of quota information together */
|
2018-10-18 06:21:39 +00:00
|
|
|
{ "qm", xfsstats_offset(xs_xstrat_bytes)},
|
2015-10-11 18:15:45 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Loop over all stats groups */
|
|
|
|
|
|
|
|
for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
|
2020-03-11 18:15:35 +00:00
|
|
|
len += scnprintf(buf + len, PATH_MAX - len, "%s",
|
2015-10-11 18:15:45 +00:00
|
|
|
xstats[i].desc);
|
|
|
|
/* inner loop does each group */
|
|
|
|
for (; j < xstats[i].endpoint; j++)
|
2020-03-11 18:15:35 +00:00
|
|
|
len += scnprintf(buf + len, PATH_MAX - len, " %u",
|
2015-10-11 18:19:45 +00:00
|
|
|
counter_val(stats, j));
|
2020-03-11 18:15:35 +00:00
|
|
|
len += scnprintf(buf + len, PATH_MAX - len, "\n");
|
2015-10-11 18:15:45 +00:00
|
|
|
}
|
|
|
|
/* extra precision counters */
|
|
|
|
for_each_possible_cpu(i) {
|
2016-12-05 03:38:58 +00:00
|
|
|
xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
|
|
|
|
xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
|
|
|
|
xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-27 23:18:13 +00:00
|
|
|
defer_relog += per_cpu_ptr(stats, i)->s.defer_relog;
|
2015-10-11 18:15:45 +00:00
|
|
|
}
|
|
|
|
|
2022-09-18 20:47:14 +00:00
|
|
|
len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n",
|
2015-10-11 18:15:45 +00:00
|
|
|
xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-27 23:18:13 +00:00
|
|
|
len += scnprintf(buf + len, PATH_MAX-len, "defer_relog %llu\n",
|
|
|
|
defer_relog);
|
2020-03-11 18:15:35 +00:00
|
|
|
len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
|
2015-10-11 18:15:45 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
1);
|
|
|
|
#else
|
|
|
|
0);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2015-10-11 18:19:45 +00:00
|
|
|
void xfs_stats_clearall(struct xfsstats __percpu *stats)
|
2015-10-11 18:15:45 +00:00
|
|
|
{
|
|
|
|
int c;
|
2017-06-16 18:00:05 +00:00
|
|
|
uint32_t vn_active;
|
2015-10-11 18:15:45 +00:00
|
|
|
|
|
|
|
xfs_notice(NULL, "Clearing xfsstats");
|
|
|
|
for_each_possible_cpu(c) {
|
|
|
|
preempt_disable();
|
|
|
|
/* save vn_active, it's a universal truth! */
|
2016-12-05 03:38:58 +00:00
|
|
|
vn_active = per_cpu_ptr(stats, c)->s.vn_active;
|
2015-10-11 18:19:45 +00:00
|
|
|
memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
|
2016-12-05 03:38:58 +00:00
|
|
|
per_cpu_ptr(stats, c)->s.vn_active = vn_active;
|
2015-10-11 18:15:45 +00:00
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-25 15:16:04 +00:00
|
|
|
#ifdef CONFIG_PROC_FS
|
2012-03-13 08:52:33 +00:00
|
|
|
/* legacy quota interfaces */
|
|
|
|
#ifdef CONFIG_XFS_QUOTA
|
2018-10-18 06:21:39 +00:00
|
|
|
|
|
|
|
#define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
|
|
|
|
#define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
|
|
|
|
|
2012-03-13 08:52:33 +00:00
|
|
|
static int xqm_proc_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
/* maximum; incore; ratio free to inuse; freelist */
|
|
|
|
seq_printf(m, "%d\t%d\t%d\t%u\n",
|
2015-10-11 18:19:45 +00:00
|
|
|
0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
|
|
|
|
0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1));
|
2012-03-13 08:52:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* legacy quota stats interface no 2 */
|
|
|
|
static int xqmstat_proc_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
int j;
|
|
|
|
|
2022-09-18 20:48:14 +00:00
|
|
|
seq_puts(m, "qm");
|
2018-10-18 06:21:39 +00:00
|
|
|
for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
|
2015-10-11 18:19:45 +00:00
|
|
|
seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
|
2012-03-13 08:52:33 +00:00
|
|
|
seq_putc(m, '\n');
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_XFS_QUOTA */
|
|
|
|
|
2008-07-18 07:11:46 +00:00
|
|
|
int
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_init_procfs(void)
|
|
|
|
{
|
|
|
|
if (!proc_mkdir("fs/xfs", NULL))
|
2015-10-12 07:21:22 +00:00
|
|
|
return -ENOMEM;
|
2008-07-18 07:11:46 +00:00
|
|
|
|
2015-10-11 18:16:45 +00:00
|
|
|
if (!proc_symlink("fs/xfs/stat", NULL,
|
|
|
|
"/sys/fs/xfs/stats/stats"))
|
2015-10-12 07:21:22 +00:00
|
|
|
goto out;
|
2015-10-11 18:16:45 +00:00
|
|
|
|
2012-03-13 08:52:33 +00:00
|
|
|
#ifdef CONFIG_XFS_QUOTA
|
2018-05-15 13:57:23 +00:00
|
|
|
if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show))
|
2015-10-12 07:21:22 +00:00
|
|
|
goto out;
|
2018-05-15 13:57:23 +00:00
|
|
|
if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show))
|
2015-10-12 07:21:22 +00:00
|
|
|
goto out;
|
2012-03-13 08:52:33 +00:00
|
|
|
#endif
|
2008-07-18 07:11:46 +00:00
|
|
|
return 0;
|
|
|
|
|
2015-10-12 07:21:22 +00:00
|
|
|
out:
|
|
|
|
remove_proc_subtree("fs/xfs", NULL);
|
2008-07-18 07:11:46 +00:00
|
|
|
return -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_cleanup_procfs(void)
|
|
|
|
{
|
2015-10-12 07:21:22 +00:00
|
|
|
remove_proc_subtree("fs/xfs", NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2015-10-18 21:42:46 +00:00
|
|
|
#endif /* CONFIG_PROC_FS */
|