bcachefs: Write out fs usage

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2019-01-24 17:54:51 -05:00 committed by Kent Overstreet
parent 2c5af169f7
commit 3ccc5c50f2
5 changed files with 120 additions and 58 deletions

View File

@ -863,19 +863,6 @@ err:
/* journal write: */
static void bch2_journal_add_btree_root(struct journal_buf *buf,
enum btree_id id, struct bkey_i *k,
unsigned level)
{
struct jset_entry *entry;
entry = bch2_journal_add_entry_noreservation(buf, k->k.u64s);
entry->type = BCH_JSET_ENTRY_btree_root;
entry->btree_id = id;
entry->level = level;
memcpy_u64s(entry->_data, k, k->k.u64s);
}
static unsigned journal_dev_buckets_available(struct journal *j,
struct journal_device *ja)
{
@ -1206,25 +1193,27 @@ void bch2_journal_write(struct closure *cl)
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
struct journal_buf *w = journal_prev_buf(j);
struct jset_entry *start, *end;
struct jset *jset;
struct bio *bio;
struct bch_extent_ptr *ptr;
bool validate_before_checksum = false;
unsigned i, sectors, bytes;
unsigned i, sectors, bytes, u64s;
journal_buf_realloc(j, w);
jset = w->data;
j->write_start_time = local_clock();
mutex_lock(&c->btree_root_lock);
for (i = 0; i < BTREE_ID_NR; i++) {
struct btree_root *r = &c->btree_roots[i];
if (r->alive)
bch2_journal_add_btree_root(w, i, &r->key, r->level);
}
c->btree_roots_dirty = false;
mutex_unlock(&c->btree_root_lock);
start = vstruct_last(w->data);
end = bch2_journal_super_entries_add_common(c, start,
le64_to_cpu(jset->seq));
u64s = (u64 *) end - (u64 *) start;
BUG_ON(u64s > j->entry_u64s_reserved);
le32_add_cpu(&w->data->u64s, u64s);
BUG_ON(vstruct_sectors(jset, c->block_bits) >
w->disk_sectors);
journal_write_compact(jset);

View File

@ -30,11 +30,6 @@ static void replicas_entry_sort(struct bch_replicas_entry *e)
bubble_sort(e->devs, e->nr_devs, u8_cmp);
}
#define for_each_cpu_replicas_entry(_r, _i) \
for (_i = (_r)->entries; \
(void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\
_i = (void *) (_i) + (_r)->entry_size)
static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
{
eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);

View File

@ -57,6 +57,11 @@ unsigned bch2_dev_has_data(struct bch_fs *, struct bch_dev *);
int bch2_replicas_gc_end(struct bch_fs *, int);
int bch2_replicas_gc_start(struct bch_fs *, unsigned);
#define for_each_cpu_replicas_entry(_r, _i) \
for (_i = (_r)->entries; \
(void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\
_i = (void *) (_i) + (_r)->entry_size)
/* iterate over superblock replicas - used by userspace tools: */
#define replicas_entry_bytes(_i) \

View File

@ -885,29 +885,112 @@ void bch2_sb_clean_renumber(struct bch_sb_field_clean *clean, int write)
bch2_bkey_renumber(BKEY_TYPE_BTREE, bkey_to_packed(entry->start), write);
}
void bch2_fs_mark_clean(struct bch_fs *c, bool clean)
static void bch2_fs_mark_dirty(struct bch_fs *c)
{
struct bch_sb_field_clean *sb_clean;
unsigned u64s = sizeof(*sb_clean) / sizeof(u64);
struct jset_entry *entry;
struct btree_root *r;
mutex_lock(&c->sb_lock);
if (clean == BCH_SB_CLEAN(c->disk_sb.sb))
goto out;
if (BCH_SB_CLEAN(c->disk_sb.sb)) {
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
bch2_write_super(c);
}
mutex_unlock(&c->sb_lock);
}
SET_BCH_SB_CLEAN(c->disk_sb.sb, clean);
if (!clean)
goto write_super;
struct jset_entry *
bch2_journal_super_entries_add_common(struct bch_fs *c,
struct jset_entry *entry,
u64 journal_seq)
{
struct jset_entry_usage *u;
struct btree_root *r;
unsigned i;
mutex_lock(&c->btree_root_lock);
for (r = c->btree_roots;
r < c->btree_roots + BTREE_ID_NR;
r++)
if (r->alive)
u64s += jset_u64s(r->key.u64s);
if (r->alive) {
entry->u64s = r->key.u64s;
entry->btree_id = r - c->btree_roots;
entry->level = r->level;
entry->type = BCH_JSET_ENTRY_btree_root;
bkey_copy(&entry->start[0], &r->key);
entry = vstruct_next(entry);
}
c->btree_roots_dirty = false;
mutex_unlock(&c->btree_root_lock);
if (journal_seq)
return entry;
percpu_down_write(&c->mark_lock);
{
u64 nr_inodes = percpu_u64_get(&c->usage[0]->s.nr_inodes);
u = container_of(entry, struct jset_entry_usage, entry);
memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_usage;
u->sectors = cpu_to_le64(nr_inodes);
u->type = FS_USAGE_INODES;
entry = vstruct_next(entry);
}
{
u = container_of(entry, struct jset_entry_usage, entry);
memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_usage;
u->sectors = cpu_to_le64(atomic64_read(&c->key_version));
u->type = FS_USAGE_KEY_VERSION;
entry = vstruct_next(entry);
}
for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i);
u64 sectors = percpu_u64_get(&c->usage[0]->data[i]);
u = container_of(entry, struct jset_entry_usage, entry);
u->entry.u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_usage;
u->sectors = cpu_to_le64(sectors);
u->type = FS_USAGE_REPLICAS;
unsafe_memcpy(&u->r, e, replicas_entry_bytes(e),
"embedded variable length struct");
entry = vstruct_next(entry);
}
percpu_up_write(&c->mark_lock);
return entry;
}
void bch2_fs_mark_clean(struct bch_fs *c, bool clean)
{
struct bch_sb_field_clean *sb_clean;
struct jset_entry *entry;
unsigned u64s;
if (!clean) {
bch2_fs_mark_dirty(c);
return;
}
mutex_lock(&c->sb_lock);
if (BCH_SB_CLEAN(c->disk_sb.sb))
goto out;
SET_BCH_SB_CLEAN(c->disk_sb.sb, true);
u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved;
sb_clean = bch2_sb_resize_clean(&c->disk_sb, u64s);
if (!sb_clean) {
@ -921,30 +1004,16 @@ void bch2_fs_mark_clean(struct bch_fs *c, bool clean)
sb_clean->journal_seq = journal_cur_seq(&c->journal) - 1;
entry = sb_clean->start;
entry = bch2_journal_super_entries_add_common(c, entry, 0);
BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
memset(entry, 0,
vstruct_end(&sb_clean->field) - (void *) entry);
for (r = c->btree_roots;
r < c->btree_roots + BTREE_ID_NR;
r++)
if (r->alive) {
entry->u64s = r->key.u64s;
entry->btree_id = r - c->btree_roots;
entry->level = r->level;
entry->type = BCH_JSET_ENTRY_btree_root;
bkey_copy(&entry->start[0], &r->key);
entry = vstruct_next(entry);
BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
}
BUG_ON(entry != vstruct_end(&sb_clean->field));
if (le16_to_cpu(c->disk_sb.sb->version) <
bcachefs_metadata_version_bkey_renumber)
bch2_sb_clean_renumber(sb_clean, WRITE);
mutex_unlock(&c->btree_root_lock);
write_super:
bch2_write_super(c);
out:
mutex_unlock(&c->sb_lock);

View File

@ -135,6 +135,10 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
/* BCH_SB_FIELD_clean: */
struct jset_entry *
bch2_journal_super_entries_add_common(struct bch_fs *,
struct jset_entry *, u64);
void bch2_sb_clean_renumber(struct bch_sb_field_clean *, int);
void bch2_fs_mark_clean(struct bch_fs *, bool);