mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
bcachefs: Tighten up btree_iter locking assertions
We weren't correctly verifying that we had interior node intent locks - this patch also fixes bugs uncovered by the new assertions. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
parent
5468f1195d
commit
5aab663534
@ -698,7 +698,9 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
||||
* currently fails for iterators that aren't pointed at a valid btree
|
||||
* node
|
||||
*/
|
||||
if (iter && !bch2_trans_relock(iter->trans))
|
||||
if (iter &&
|
||||
(!bch2_trans_relock(iter->trans) ||
|
||||
!bch2_btree_iter_relock(iter, _THIS_IP_)))
|
||||
return ERR_PTR(-EINTR);
|
||||
|
||||
if (!six_relock_type(&b->c.lock, lock_type, seq))
|
||||
@ -858,7 +860,9 @@ lock_node:
|
||||
* currently fails for iterators that aren't pointed at a valid
|
||||
* btree node
|
||||
*/
|
||||
if (iter && !bch2_trans_relock(iter->trans))
|
||||
if (iter &&
|
||||
(!bch2_trans_relock(iter->trans) ||
|
||||
!bch2_btree_iter_relock(iter, _THIS_IP_)))
|
||||
return ERR_PTR(-EINTR);
|
||||
|
||||
if (!six_relock_type(&b->c.lock, lock_type, seq))
|
||||
|
@ -361,7 +361,7 @@ static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
|
||||
return;
|
||||
}
|
||||
|
||||
for (l = 0; is_btree_node(iter, l); l++) {
|
||||
for (l = 0; btree_iter_node(iter, l); l++) {
|
||||
if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
|
||||
!btree_node_locked(iter, l))
|
||||
continue;
|
||||
@ -383,7 +383,7 @@ static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
|
||||
#endif
|
||||
|
||||
__flatten
|
||||
static bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
|
||||
bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
|
||||
{
|
||||
return btree_iter_get_locks(iter, false, trace_ip);
|
||||
}
|
||||
@ -607,6 +607,8 @@ err:
|
||||
|
||||
static void bch2_btree_iter_verify(struct btree_iter *iter)
|
||||
{
|
||||
struct btree_trans *trans = iter->trans;
|
||||
struct bch_fs *c = trans->c;
|
||||
enum btree_iter_type type = btree_iter_type(iter);
|
||||
unsigned i;
|
||||
|
||||
@ -625,10 +627,16 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
|
||||
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
!btree_type_has_snapshots(iter->btree_id));
|
||||
|
||||
bch2_btree_iter_verify_locks(iter);
|
||||
for (i = 0; i < (type != BTREE_ITER_CACHED ? BTREE_MAX_DEPTH : 1); i++) {
|
||||
if (!iter->l[i].b) {
|
||||
BUG_ON(c->btree_roots[iter->btree_id].b->c.level > i);
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < BTREE_MAX_DEPTH; i++)
|
||||
bch2_btree_iter_verify_level(iter, i);
|
||||
}
|
||||
|
||||
bch2_btree_iter_verify_locks(iter);
|
||||
}
|
||||
|
||||
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
|
||||
@ -1350,30 +1358,30 @@ static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
|
||||
static int btree_iter_traverse_one(struct btree_iter *iter,
|
||||
unsigned long trace_ip)
|
||||
{
|
||||
unsigned depth_want = iter->level;
|
||||
unsigned l, depth_want = iter->level;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* if we need interior nodes locked, call btree_iter_relock() to make
|
||||
* sure we walk back up enough that we lock them:
|
||||
*/
|
||||
if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
|
||||
iter->locks_want > 1)
|
||||
bch2_btree_iter_relock(iter, _THIS_IP_);
|
||||
|
||||
if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
|
||||
ret = bch2_btree_iter_traverse_cached(iter);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
|
||||
goto out;
|
||||
|
||||
if (unlikely(iter->level >= BTREE_MAX_DEPTH))
|
||||
goto out;
|
||||
|
||||
iter->level = btree_iter_up_until_good_node(iter, 0);
|
||||
|
||||
/* If we need intent locks, take them too: */
|
||||
for (l = iter->level + 1;
|
||||
l < iter->locks_want && btree_iter_node(iter, l);
|
||||
l++)
|
||||
if (!bch2_btree_node_relock(iter, l))
|
||||
while (iter->level <= l) {
|
||||
btree_node_unlock(iter, iter->level);
|
||||
iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
|
||||
iter->level++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: iter->nodes[iter->level] may be temporarily NULL here - that
|
||||
* would indicate to other code that we got to the end of the btree,
|
||||
@ -1394,6 +1402,7 @@ static int btree_iter_traverse_one(struct btree_iter *iter,
|
||||
goto out;
|
||||
}
|
||||
|
||||
__bch2_btree_iter_unlock(iter);
|
||||
iter->level = depth_want;
|
||||
|
||||
if (ret == -EIO) {
|
||||
|
@ -111,6 +111,8 @@ void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
|
||||
struct btree_node_iter *, struct bkey_packed *,
|
||||
unsigned, unsigned);
|
||||
|
||||
bool bch2_btree_iter_relock(struct btree_iter *, unsigned long);
|
||||
|
||||
bool bch2_trans_relock(struct btree_trans *);
|
||||
void bch2_trans_unlock(struct btree_trans *);
|
||||
|
||||
|
@ -271,7 +271,9 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
|
||||
|
||||
BUG_ON(iter->level);
|
||||
|
||||
if (btree_node_locked(iter, 0)) {
|
||||
iter->l[1].b = NULL;
|
||||
|
||||
if (bch2_btree_node_relock(iter, 0)) {
|
||||
ck = (void *) iter->l[0].b;
|
||||
goto fill;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user