mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
bcachefs: Kill read lock dropping in bch2_btree_node_lock_write_nofail()
dropping read locks in bch2_btree_node_lock_write_nofail() dates from before we had the cycle detector; we can now tell the cycle detector directly when taking a lock may not fail because we can't handle transaction restarts. This is needed for adding should_be_locked asserts. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
beccf29114
commit
517236cb3e
@ -440,33 +440,7 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
struct btree_bkey_cached_common *b)
|
||||
{
|
||||
struct btree_path *linked;
|
||||
unsigned i, iter;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* XXX BIG FAT NOTICE
|
||||
*
|
||||
* Drop all read locks before taking a write lock:
|
||||
*
|
||||
* This is a hack, because bch2_btree_node_lock_write_nofail() is a
|
||||
* hack - but by dropping read locks first, this should never fail, and
|
||||
* we only use this in code paths where whatever read locks we've
|
||||
* already taken are no longer needed:
|
||||
*/
|
||||
|
||||
trans_for_each_path(trans, linked, iter) {
|
||||
if (!linked->nodes_locked)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < BTREE_MAX_DEPTH; i++)
|
||||
if (btree_node_read_locked(linked, i)) {
|
||||
btree_node_unlock(trans, linked, i);
|
||||
btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
|
||||
}
|
||||
}
|
||||
|
||||
ret = __btree_node_lock_write(trans, path, b, true);
|
||||
int ret = __btree_node_lock_write(trans, path, b, true);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user