bcachefs: Options for recovery_passes, recovery_passes_exclude

This adds mount options for specifying recovery passes to run, or
exclude; the immediate need for this is that backpointers fsck is having
trouble completing, so we need a way to skip it.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2024-09-01 22:39:42 -04:00
parent ff7f756f2b
commit 4f19a60c32
8 changed files with 33 additions and 20 deletions

View File

@ -1045,8 +1045,6 @@ struct bch_fs {
* for signaling to the toplevel code which pass we want to run now. * for signaling to the toplevel code which pass we want to run now.
*/ */
enum bch_recovery_pass curr_recovery_pass; enum bch_recovery_pass curr_recovery_pass;
/* bitmap of explicitly enabled recovery passes: */
u64 recovery_passes_explicit;
/* bitmask of recovery passes that we actually ran */ /* bitmask of recovery passes that we actually ran */
u64 recovery_passes_complete; u64 recovery_passes_complete;
/* never rewinds version of curr_recovery_pass */ /* never rewinds version of curr_recovery_pass */

View File

@ -1666,7 +1666,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
bch2_btree_pos_to_text(&buf, c, b); bch2_btree_pos_to_text(&buf, c, b);
bch_err_ratelimited(c, "%s", buf.buf); bch_err_ratelimited(c, "%s", buf.buf);
if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) && if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
bch2_fatal_error(c); bch2_fatal_error(c);

View File

@ -146,7 +146,7 @@ fsck_err:
printbuf_exit(&buf); printbuf_exit(&buf);
return ret; return ret;
topology_repair: topology_repair:
if ((c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) && if ((c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) &&
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) { c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) {
bch2_inconsistent_error(c); bch2_inconsistent_error(c);
ret = -BCH_ERR_btree_need_topology_repair; ret = -BCH_ERR_btree_need_topology_repair;

View File

@ -432,6 +432,9 @@ void bch2_opt_to_text(struct printbuf *out,
else else
prt_str(out, opt->choices[v]); prt_str(out, opt->choices[v]);
break; break;
case BCH_OPT_BITFIELD:
prt_bitflags(out, opt->choices, v);
break;
case BCH_OPT_FN: case BCH_OPT_FN:
opt->fn.to_text(out, c, sb, v); opt->fn.to_text(out, c, sb, v);
break; break;

View File

@ -373,6 +373,16 @@ enum fsck_err_opts {
OPT_BOOL(), \ OPT_BOOL(), \
BCH2_NO_SB_OPT, false, \ BCH2_NO_SB_OPT, false, \
NULL, "Exit recovery immediately prior to journal replay")\ NULL, "Exit recovery immediately prior to journal replay")\
x(recovery_passes, u64, \
OPT_FS|OPT_MOUNT, \
OPT_BITFIELD(bch2_recovery_passes), \
BCH2_NO_SB_OPT, 0, \
NULL, "Recovery passes to run explicitly") \
x(recovery_passes_exclude, u64, \
OPT_FS|OPT_MOUNT, \
OPT_BITFIELD(bch2_recovery_passes), \
BCH2_NO_SB_OPT, 0, \
NULL, "Recovery passes to exclude") \
x(recovery_pass_last, u8, \ x(recovery_pass_last, u8, \
OPT_FS|OPT_MOUNT, \ OPT_FS|OPT_MOUNT, \
OPT_STR_NOLIMIT(bch2_recovery_passes), \ OPT_STR_NOLIMIT(bch2_recovery_passes), \

View File

@ -97,7 +97,7 @@ static void bch2_reconstruct_alloc(struct bch_fs *c)
bch2_write_super(c); bch2_write_super(c);
mutex_unlock(&c->sb_lock); mutex_unlock(&c->sb_lock);
c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
bch2_shoot_down_journal_keys(c, BTREE_ID_alloc, bch2_shoot_down_journal_keys(c, BTREE_ID_alloc,
@ -525,17 +525,17 @@ static int read_btree_roots(struct bch_fs *c)
"error reading btree root %s l=%u: %s", "error reading btree root %s l=%u: %s",
bch2_btree_id_str(i), r->level, bch2_err_str(ret))) { bch2_btree_id_str(i), r->level, bch2_err_str(ret))) {
if (btree_id_is_alloc(i)) { if (btree_id_is_alloc(i)) {
c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations); c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations);
c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info); c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info);
c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus); c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus);
c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers); c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers);
c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs); c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs);
c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
r->error = 0; r->error = 0;
} else if (!(c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) { } else if (!(c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) {
bch_info(c, "will run btree node scan"); bch_info(c, "will run btree node scan");
c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes); c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes);
c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology); c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
} }
ret = 0; ret = 0;
@ -706,14 +706,14 @@ int bch2_fs_recovery(struct bch_fs *c)
if (check_version_upgrade(c)) if (check_version_upgrade(c))
write_sb = true; write_sb = true;
c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
if (write_sb) if (write_sb)
bch2_write_super(c); bch2_write_super(c);
mutex_unlock(&c->sb_lock); mutex_unlock(&c->sb_lock);
if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology); c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
if (c->opts.fsck) if (c->opts.fsck)
set_bit(BCH_FS_fsck_running, &c->flags); set_bit(BCH_FS_fsck_running, &c->flags);

View File

@ -40,7 +40,7 @@ static int bch2_set_may_go_rw(struct bch_fs *c)
set_bit(BCH_FS_may_go_rw, &c->flags); set_bit(BCH_FS_may_go_rw, &c->flags);
if (keys->nr || c->opts.fsck || !c->sb.clean || c->recovery_passes_explicit) if (keys->nr || c->opts.fsck || !c->sb.clean || c->opts.recovery_passes)
return bch2_fs_read_write_early(c); return bch2_fs_read_write_early(c);
return 0; return 0;
} }
@ -97,14 +97,14 @@ u64 bch2_recovery_passes_from_stable(u64 v)
int bch2_run_explicit_recovery_pass(struct bch_fs *c, int bch2_run_explicit_recovery_pass(struct bch_fs *c,
enum bch_recovery_pass pass) enum bch_recovery_pass pass)
{ {
if (c->recovery_passes_explicit & BIT_ULL(pass)) if (c->opts.recovery_passes & BIT_ULL(pass))
return 0; return 0;
bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)", bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
bch2_recovery_passes[pass], pass, bch2_recovery_passes[pass], pass,
bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass); bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
c->recovery_passes_explicit |= BIT_ULL(pass); c->opts.recovery_passes |= BIT_ULL(pass);
if (c->curr_recovery_pass >= pass) { if (c->curr_recovery_pass >= pass) {
c->curr_recovery_pass = pass; c->curr_recovery_pass = pass;
@ -161,7 +161,9 @@ static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pa
{ {
struct recovery_pass_fn *p = recovery_pass_fns + pass; struct recovery_pass_fn *p = recovery_pass_fns + pass;
if (c->recovery_passes_explicit & BIT_ULL(pass)) if (c->opts.recovery_passes_exclude & BIT_ULL(pass))
return false;
if (c->opts.recovery_passes & BIT_ULL(pass))
return true; return true;
if ((p->when & PASS_FSCK) && c->opts.fsck) if ((p->when & PASS_FSCK) && c->opts.fsck)
return true; return true;

View File

@ -214,7 +214,7 @@ u64 bch2_read_flag_list(const char *opt, const char * const list[])
s = strim(d); s = strim(d);
while ((p = strsep(&s, ","))) { while ((p = strsep(&s, ",;"))) {
int flag = match_string(list, -1, p); int flag = match_string(list, -1, p);
if (flag < 0) { if (flag < 0) {