bcachefs: Convert for_each_btree_node() to lockrestart_do()

for_each_btree_node() now works similarly to for_each_btree_key(), where
the loop body is passed as an argument to be passed to lockrestart_do().

This now calls trans_begin() on every loop iteration - which fixes an
SRCU warning in backpointers fsck.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2024-08-07 16:34:28 -04:00
parent 48d6cc1b48
commit 968feb854a
4 changed files with 42 additions and 54 deletions

View File

@ -763,27 +763,22 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
btree < BTREE_ID_NR && !ret; btree < BTREE_ID_NR && !ret;
btree++) { btree++) {
unsigned depth = (BIT_ULL(btree) & btree_leaf_mask) ? 0 : 1; unsigned depth = (BIT_ULL(btree) & btree_leaf_mask) ? 0 : 1;
struct btree_iter iter;
struct btree *b;
if (!(BIT_ULL(btree) & btree_leaf_mask) && if (!(BIT_ULL(btree) & btree_leaf_mask) &&
!(BIT_ULL(btree) & btree_interior_mask)) !(BIT_ULL(btree) & btree_interior_mask))
continue; continue;
bch2_trans_begin(trans); ret = __for_each_btree_node(trans, iter, btree,
__for_each_btree_node(trans, iter, btree,
btree == start.btree ? start.pos : POS_MIN, btree == start.btree ? start.pos : POS_MIN,
0, depth, BTREE_ITER_prefetch, b, ret) { 0, depth, BTREE_ITER_prefetch, b, ({
mem_may_pin -= btree_buf_bytes(b); mem_may_pin -= btree_buf_bytes(b);
if (mem_may_pin <= 0) { if (mem_may_pin <= 0) {
c->btree_cache.pinned_nodes_end = *end = c->btree_cache.pinned_nodes_end = *end =
BBPOS(btree, b->key.k.p); BBPOS(btree, b->key.k.p);
bch2_trans_iter_exit(trans, &iter); break;
return 0;
} }
} 0;
bch2_trans_iter_exit(trans, &iter); }));
} }
return ret; return ret;

View File

@ -1900,6 +1900,7 @@ err:
goto out; goto out;
} }
/* Only kept for -tools */
struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter) struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
{ {
struct btree *b; struct btree *b;

View File

@ -600,23 +600,35 @@ void bch2_trans_srcu_unlock(struct btree_trans *);
u32 bch2_trans_begin(struct btree_trans *); u32 bch2_trans_begin(struct btree_trans *);
/* #define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
* XXX _locks_want, _depth, _flags, _b, _do) \
* this does not handle transaction restarts from bch2_btree_iter_next_node() ({ \
* correctly bch2_trans_begin((_trans)); \
*/ \
#define __for_each_btree_node(_trans, _iter, _btree_id, _start, \ struct btree_iter _iter; \
_locks_want, _depth, _flags, _b, _ret) \ bch2_trans_node_iter_init((_trans), &_iter, (_btree_id), \
for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \ _start, _locks_want, _depth, _flags); \
_start, _locks_want, _depth, _flags); \ int _ret3 = 0; \
(_b) = bch2_btree_iter_peek_node_and_restart(&(_iter)), \ do { \
!((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b); \ _ret3 = lockrestart_do((_trans), ({ \
(_b) = bch2_btree_iter_next_node(&(_iter))) struct btree *_b = bch2_btree_iter_peek_node(&_iter); \
if (!_b) \
break; \
\
PTR_ERR_OR_ZERO(_b) ?: (_do); \
})) ?: \
lockrestart_do((_trans), \
PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter))); \
} while (!_ret3); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
})
#define for_each_btree_node(_trans, _iter, _btree_id, _start, \ #define for_each_btree_node(_trans, _iter, _btree_id, _start, \
_flags, _b, _ret) \ _flags, _b, _do) \
__for_each_btree_node(_trans, _iter, _btree_id, _start, \ __for_each_btree_node(_trans, _iter, _btree_id, _start, \
0, 0, _flags, _b, _ret) 0, 0, _flags, _b, _do)
static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter, static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
unsigned flags) unsigned flags)

View File

@ -397,47 +397,27 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
size_t size, loff_t *ppos) size_t size, loff_t *ppos)
{ {
struct dump_iter *i = file->private_data; struct dump_iter *i = file->private_data;
struct btree_trans *trans;
struct btree_iter iter;
struct btree *b;
ssize_t ret;
i->ubuf = buf; i->ubuf = buf;
i->size = size; i->size = size;
i->ret = 0; i->ret = 0;
ret = flush_buf(i); ssize_t ret = flush_buf(i);
if (ret) if (ret)
return ret; return ret;
if (bpos_eq(SPOS_MAX, i->from)) if (bpos_eq(SPOS_MAX, i->from))
return i->ret; return i->ret;
trans = bch2_trans_get(i->c); return bch2_trans_run(i->c,
retry: for_each_btree_node(trans, iter, i->id, i->from, 0, b, ({
bch2_trans_begin(trans); bch2_btree_node_to_text(&i->buf, i->c, b);
i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
? bpos_successor(b->key.k.p)
: b->key.k.p;
for_each_btree_node(trans, iter, i->id, i->from, 0, b, ret) { drop_locks_do(trans, flush_buf(i));
bch2_btree_node_to_text(&i->buf, i->c, b); }))) ?: i->ret;
i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
? bpos_successor(b->key.k.p)
: b->key.k.p;
ret = drop_locks_do(trans, flush_buf(i));
if (ret)
break;
}
bch2_trans_iter_exit(trans, &iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_put(trans);
if (!ret)
ret = flush_buf(i);
return ret ?: i->ret;
} }
static const struct file_operations btree_format_debug_ops = { static const struct file_operations btree_format_debug_ops = {