bcachefs: journal seq blacklist gc no longer has to walk btree

Since btree_ptr_v2, we no longer require the journal seq blacklist table
for skipping blacklisted bsets (btree node entries); the pointer to a
given node indicates how much data is present.

Therefore there's no longer any need for journal seq blacklist gc to
walk the btree - we can prune entries older than journal last_seq.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2024-04-20 22:19:48 -04:00
parent e7f63c67fc
commit f04158290d
7 changed files with 26 additions and 66 deletions

View File

@ -930,7 +930,6 @@ struct bch_fs {
/* JOURNAL SEQ BLACKLIST */
struct journal_seq_blacklist_table *
journal_seq_blacklist_table;
struct work_struct journal_seq_blacklist_gc_work;
/* ALLOCATOR */
spinlock_t freelist_lock;

View File

@ -122,6 +122,10 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
struct printbuf buf = PRINTBUF;
int ret = JOURNAL_ENTRY_ADD_OK;
if (!c->journal.oldest_seq_found_ondisk ||
le64_to_cpu(j->seq) < c->journal.oldest_seq_found_ondisk)
c->journal.oldest_seq_found_ondisk = le64_to_cpu(j->seq);
/* Is this entry older than the range we need? */
if (!c->opts.read_entire_journal &&
le64_to_cpu(j->seq) < jlist->last_seq)

View File

@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "btree_iter.h"
#include "eytzinger.h"
#include "journal.h"
#include "journal_seq_blacklist.h"
#include "super-io.h"
@ -217,78 +217,40 @@ const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
.to_text = bch2_sb_journal_seq_blacklist_to_text
};
void bch2_blacklist_entries_gc(struct work_struct *work)
bool bch2_blacklist_entries_gc(struct bch_fs *c)
{
struct bch_fs *c = container_of(work, struct bch_fs,
journal_seq_blacklist_gc_work);
struct journal_seq_blacklist_table *t;
struct bch_sb_field_journal_seq_blacklist *bl;
struct journal_seq_blacklist_entry *src, *dst;
struct btree_trans *trans = bch2_trans_get(c);
unsigned i, nr, new_nr;
int ret;
for (i = 0; i < BTREE_ID_NR; i++) {
struct btree_iter iter;
struct btree *b;
bch2_trans_node_iter_init(trans, &iter, i, POS_MIN,
0, 0, BTREE_ITER_prefetch);
retry:
bch2_trans_begin(trans);
b = bch2_btree_iter_peek_node(&iter);
while (!(ret = PTR_ERR_OR_ZERO(b)) &&
b &&
!test_bit(BCH_FS_stopping, &c->flags))
b = bch2_btree_iter_next_node(&iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_iter_exit(trans, &iter);
}
bch2_trans_put(trans);
if (ret)
return;
mutex_lock(&c->sb_lock);
bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
struct bch_sb_field_journal_seq_blacklist *bl =
bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
if (!bl)
goto out;
return false;
nr = blacklist_nr_entries(bl);
unsigned nr = blacklist_nr_entries(bl);
dst = bl->start;
t = c->journal_seq_blacklist_table;
struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
BUG_ON(nr != t->nr);
unsigned i;
for (src = bl->start, i = eytzinger0_first(t->nr);
src < bl->start + nr;
src++, i = eytzinger0_next(i, nr)) {
BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
BUG_ON(t->entries[i].end != le64_to_cpu(src->end));
if (t->entries[i].dirty)
if (t->entries[i].dirty || t->entries[i].end >= c->journal.oldest_seq_found_ondisk)
*dst++ = *src;
}
new_nr = dst - bl->start;
unsigned new_nr = dst - bl->start;
if (new_nr == nr)
return false;
bch_info(c, "nr blacklist entries was %u, now %u", nr, new_nr);
bch_verbose(c, "nr blacklist entries was %u, now %u", nr, new_nr);
if (new_nr != nr) {
bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
new_nr ? sb_blacklist_u64s(new_nr) : 0);
BUG_ON(new_nr && !bl);
if (!new_nr)
c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3));
bch2_write_super(c);
}
out:
mutex_unlock(&c->sb_lock);
return true;
}

View File

@ -17,6 +17,6 @@ int bch2_blacklist_table_initialize(struct bch_fs *);
extern const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist;
void bch2_blacklist_entries_gc(struct work_struct *);
bool bch2_blacklist_entries_gc(struct bch_fs *);
#endif /* _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H */

View File

@ -229,6 +229,7 @@ struct journal {
u64 last_seq_ondisk;
u64 err_seq;
u64 last_empty_seq;
u64 oldest_seq_found_ondisk;
/*
* FIFO of journal entries whose btree updates have not yet been

View File

@ -878,6 +878,9 @@ use_clean:
write_sb = true;
}
if (bch2_blacklist_entries_gc(c))
write_sb = true;
if (write_sb)
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
@ -900,10 +903,6 @@ use_clean:
bch_info(c, "scanning for old btree nodes done");
}
if (c->journal_seq_blacklist_table &&
c->journal_seq_blacklist_table->nr > 128)
queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
ret = 0;
out:
bch2_flush_fsck_errs(c);

View File

@ -611,8 +611,6 @@ void __bch2_fs_stop(struct bch_fs *c)
set_bit(BCH_FS_stopping, &c->flags);
cancel_work_sync(&c->journal_seq_blacklist_gc_work);
down_write(&c->state_lock);
bch2_fs_read_only(c);
up_write(&c->state_lock);
@ -796,9 +794,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
spin_lock_init(&c->btree_write_error_lock);
INIT_WORK(&c->journal_seq_blacklist_gc_work,
bch2_blacklist_entries_gc);
INIT_LIST_HEAD(&c->journal_iters);
INIT_LIST_HEAD(&c->fsck_error_msgs);