mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
bcachefs: dev_usage updated by new accounting
Reading disk accounting now requires an eytzinger lookup (see: bch2_accounting_mem_read()), but the per-device counters are used frequently enough that we'd like to still be able to read them with just a percpu sum, as in the old code. This patch special cases the device counters; when we update in-memory accounting we also update the old style percpu counters if it's a deice counter update. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
2e8d686a4a
commit
f5095b9f85
@ -545,8 +545,7 @@ struct bch_dev {
|
||||
unsigned long *buckets_nouse;
|
||||
struct rw_semaphore bucket_lock;
|
||||
|
||||
struct bch_dev_usage *usage_base;
|
||||
struct bch_dev_usage __percpu *usage[JOURNAL_BUF_NR];
|
||||
struct bch_dev_usage __percpu *usage;
|
||||
struct bch_dev_usage __percpu *usage_gc;
|
||||
|
||||
/* Allocator: */
|
||||
|
@ -773,7 +773,7 @@ static int bch2_gc_done(struct bch_fs *c)
|
||||
bch2_fs_usage_acc_to_base(c, i);
|
||||
|
||||
__for_each_member_device(c, ca) {
|
||||
struct bch_dev_usage *dst = ca->usage_base;
|
||||
struct bch_dev_usage *dst = this_cpu_ptr(ca->usage);
|
||||
struct bch_dev_usage *src = (void *)
|
||||
bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
|
||||
dev_usage_u64s());
|
||||
|
@ -69,15 +69,8 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
|
||||
|
||||
void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
|
||||
{
|
||||
struct bch_fs *c = ca->fs;
|
||||
unsigned seq, i, u64s = dev_usage_u64s();
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(&c->usage_lock);
|
||||
memcpy(usage, ca->usage_base, u64s * sizeof(u64));
|
||||
for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
|
||||
acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s);
|
||||
} while (read_seqcount_retry(&c->usage_lock, seq));
|
||||
memset(usage, 0, sizeof(*usage));
|
||||
acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
|
||||
}
|
||||
|
||||
u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
|
||||
@ -147,16 +140,6 @@ void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
|
||||
(u64 __percpu *) c->usage[idx], u64s);
|
||||
percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_member_device_rcu(c, ca, NULL) {
|
||||
u64s = dev_usage_u64s();
|
||||
|
||||
acc_u64s_percpu((u64 *) ca->usage_base,
|
||||
(u64 __percpu *) ca->usage[idx], u64s);
|
||||
percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
write_seqcount_end(&c->usage_lock);
|
||||
preempt_enable();
|
||||
}
|
||||
@ -1488,23 +1471,14 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
|
||||
{
|
||||
kvfree(ca->buckets_nouse);
|
||||
kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++)
|
||||
free_percpu(ca->usage[i]);
|
||||
kfree(ca->usage_base);
|
||||
free_percpu(ca->usage);
|
||||
}
|
||||
|
||||
int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
|
||||
if (!ca->usage_base)
|
||||
ca->usage = alloc_percpu(struct bch_dev_usage);
|
||||
if (!ca->usage)
|
||||
return -BCH_ERR_ENOMEM_usage_init;
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) {
|
||||
ca->usage[i] = alloc_percpu(struct bch_dev_usage);
|
||||
if (!ca->usage[i])
|
||||
return -BCH_ERR_ENOMEM_usage_init;
|
||||
}
|
||||
|
||||
return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ struct bucket_gens {
|
||||
};
|
||||
|
||||
struct bch_dev_usage {
|
||||
struct {
|
||||
struct bch_dev_usage_type {
|
||||
u64 buckets;
|
||||
u64 sectors; /* _compressed_ sectors: */
|
||||
/*
|
||||
|
@ -400,6 +400,21 @@ int bch2_accounting_read(struct bch_fs *c)
|
||||
case BCH_DISK_ACCOUNTING_replicas:
|
||||
fs_usage_data_type_to_base(usage, k.replicas.data_type, v[0]);
|
||||
break;
|
||||
case BCH_DISK_ACCOUNTING_dev_data_type:
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, k.dev_data_type.dev);
|
||||
if (ca) {
|
||||
struct bch_dev_usage_type __percpu *d = &ca->usage->d[k.dev_data_type.data_type];
|
||||
percpu_u64_set(&d->buckets, v[0]);
|
||||
percpu_u64_set(&d->sectors, v[1]);
|
||||
percpu_u64_set(&d->fragmented, v[2]);
|
||||
|
||||
if (k.dev_data_type.data_type == BCH_DATA_sb ||
|
||||
k.dev_data_type.data_type == BCH_DATA_journal)
|
||||
usage->hidden += v[0] * ca->mi.bucket_size;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
}
|
||||
}
|
||||
preempt_enable();
|
||||
|
@ -3,6 +3,7 @@
|
||||
#define _BCACHEFS_DISK_ACCOUNTING_H
|
||||
|
||||
#include "eytzinger.h"
|
||||
#include "sb-members.h"
|
||||
|
||||
static inline void bch2_u64s_neg(u64 *v, unsigned nr)
|
||||
{
|
||||
@ -131,6 +132,7 @@ static inline int __bch2_accounting_mem_mod(struct bch_fs *c, struct bkey_s_c_ac
|
||||
static inline int bch2_accounting_mem_mod(struct btree_trans *trans, struct
|
||||
bkey_s_c_accounting a)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct disk_accounting_pos acc_k;
|
||||
bpos_to_disk_accounting_pos(&acc_k, a.k->p);
|
||||
|
||||
@ -141,8 +143,18 @@ static inline int bch2_accounting_mem_mod(struct btree_trans *trans, struct
|
||||
case BCH_DISK_ACCOUNTING_replicas:
|
||||
fs_usage_data_type_to_base(&trans->fs_usage_delta, acc_k.replicas.data_type, a.v->d[0]);
|
||||
break;
|
||||
case BCH_DISK_ACCOUNTING_dev_data_type:
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, acc_k.dev_data_type.dev);
|
||||
if (ca) {
|
||||
this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].buckets, a.v->d[0]);
|
||||
this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].sectors, a.v->d[1]);
|
||||
this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].fragmented, a.v->d[2]);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
}
|
||||
return __bch2_accounting_mem_mod(trans->c, a);
|
||||
return __bch2_accounting_mem_mod(c, a);
|
||||
}
|
||||
|
||||
static inline void bch2_accounting_mem_read_counters(struct bch_fs *c,
|
||||
|
@ -451,23 +451,6 @@ static int journal_replay_entry_early(struct bch_fs *c,
|
||||
le64_to_cpu(u->v));
|
||||
break;
|
||||
}
|
||||
case BCH_JSET_ENTRY_dev_usage: {
|
||||
struct jset_entry_dev_usage *u =
|
||||
container_of(entry, struct jset_entry_dev_usage, entry);
|
||||
unsigned nr_types = jset_entry_dev_usage_nr_types(u);
|
||||
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, le32_to_cpu(u->dev));
|
||||
if (ca)
|
||||
for (unsigned i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
|
||||
ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
|
||||
ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
|
||||
ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
break;
|
||||
}
|
||||
case BCH_JSET_ENTRY_blacklist: {
|
||||
struct jset_entry_blacklist *bl_entry =
|
||||
container_of(entry, struct jset_entry_blacklist, entry);
|
||||
|
@ -236,23 +236,6 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
|
||||
"embedded variable length struct");
|
||||
}
|
||||
|
||||
for_each_member_device(c, ca) {
|
||||
unsigned b = sizeof(struct jset_entry_dev_usage) +
|
||||
sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR;
|
||||
struct jset_entry_dev_usage *u =
|
||||
container_of(jset_entry_init(end, b),
|
||||
struct jset_entry_dev_usage, entry);
|
||||
|
||||
u->entry.type = BCH_JSET_ENTRY_dev_usage;
|
||||
u->dev = cpu_to_le32(ca->dev_idx);
|
||||
|
||||
for (unsigned i = 0; i < BCH_DATA_NR; i++) {
|
||||
u->d[i].buckets = cpu_to_le64(ca->usage_base->d[i].buckets);
|
||||
u->d[i].sectors = cpu_to_le64(ca->usage_base->d[i].sectors);
|
||||
u->d[i].fragmented = cpu_to_le64(ca->usage_base->d[i].fragmented);
|
||||
}
|
||||
}
|
||||
|
||||
percpu_up_read(&c->mark_lock);
|
||||
|
||||
for (unsigned i = 0; i < 2; i++) {
|
||||
|
Loading…
Reference in New Issue
Block a user