btrfs: use a read/write lock for protecting the block groups tree

Currently we use a spin lock to protect the red black tree that we use to
track block groups. Most accesses to that tree are actually read only and
for large filesystems, with thousands of block groups, it actually has
a bad impact on performance, as concurrent read only searches on the tree
are serialized.

Read only searches on the tree are very frequent and done when:

1) Pinning and unpinning extents, as we need to lookup the respective
   block group from the tree;

2) Freeing the last reference of a tree block, regardless if we pin the
   underlying extent or add it back to free space cache/tree;

3) During NOCOW writes, both buffered IO and direct IO, we need to check
   if the block group that contains an extent is read only or not and to
   increment the number of NOCOW writers in the block group. For those
   operations we need to search for the block group in the tree.
   Similarly, after creating the ordered extent for the NOCOW write, we
   need to decrement the number of NOCOW writers from the same block
   group, which requires searching for it in the tree;

4) Decreasing the number of extent reservations in a block group;

5) When allocating extents and freeing reserved extents;

6) Adding and removing free space to the free space tree;

7) When releasing delalloc bytes during ordered extent completion;

8) When relocating a block group;

9) During fitrim, to iterate over the block groups;

10) etc;

Write accesses to the tree, to add or remove block groups, are much less
frequent as they happen only when allocating a new block group or when
deleting a block group.

We also use the same spin lock to protect the list of currently caching
block groups. Additions to this list are made when we need to cache a
block group, because we don't have a free space cache for it (or we have
but it's invalid), and removals from this list are done when caching of
the block group's free space finishes. These cases are also not very
common, but when they happen, they happen only once when the filesystem
is mounted.

So switch the lock that protects the tree of block groups from a spinning
lock to a read/write lock.

Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Filipe Manana 2022-04-13 16:20:41 +01:00 committed by David Sterba
parent 08dddb2951
commit 16b0c2581e
5 changed files with 26 additions and 26 deletions

View File

@ -172,7 +172,7 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
ASSERT(block_group->length != 0);
spin_lock(&info->block_group_cache_lock);
write_lock(&info->block_group_cache_lock);
p = &info->block_group_cache_tree.rb_root.rb_node;
while (*p) {
@ -184,7 +184,7 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
p = &(*p)->rb_right;
leftmost = false;
} else {
spin_unlock(&info->block_group_cache_lock);
write_unlock(&info->block_group_cache_lock);
return -EEXIST;
}
}
@ -193,7 +193,7 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
rb_insert_color_cached(&block_group->cache_node,
&info->block_group_cache_tree, leftmost);
spin_unlock(&info->block_group_cache_lock);
write_unlock(&info->block_group_cache_lock);
return 0;
}
@ -209,7 +209,7 @@ static struct btrfs_block_group *block_group_cache_tree_search(
struct rb_node *n;
u64 end, start;
spin_lock(&info->block_group_cache_lock);
read_lock(&info->block_group_cache_lock);
n = info->block_group_cache_tree.rb_root.rb_node;
while (n) {
@ -234,7 +234,7 @@ static struct btrfs_block_group *block_group_cache_tree_search(
}
if (ret)
btrfs_get_block_group(ret);
spin_unlock(&info->block_group_cache_lock);
read_unlock(&info->block_group_cache_lock);
return ret;
}
@ -263,13 +263,13 @@ struct btrfs_block_group *btrfs_next_block_group(
struct btrfs_fs_info *fs_info = cache->fs_info;
struct rb_node *node;
spin_lock(&fs_info->block_group_cache_lock);
read_lock(&fs_info->block_group_cache_lock);
/* If our block group was removed, we need a full search. */
if (RB_EMPTY_NODE(&cache->cache_node)) {
const u64 next_bytenr = cache->start + cache->length;
spin_unlock(&fs_info->block_group_cache_lock);
read_unlock(&fs_info->block_group_cache_lock);
btrfs_put_block_group(cache);
cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
}
@ -280,7 +280,7 @@ struct btrfs_block_group *btrfs_next_block_group(
btrfs_get_block_group(cache);
} else
cache = NULL;
spin_unlock(&fs_info->block_group_cache_lock);
read_unlock(&fs_info->block_group_cache_lock);
return cache;
}
@ -768,10 +768,10 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
cache->has_caching_ctl = 1;
spin_unlock(&cache->lock);
spin_lock(&fs_info->block_group_cache_lock);
write_lock(&fs_info->block_group_cache_lock);
refcount_inc(&caching_ctl->count);
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
spin_unlock(&fs_info->block_group_cache_lock);
write_unlock(&fs_info->block_group_cache_lock);
btrfs_get_block_group(cache);
@ -953,7 +953,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
if (ret)
goto out;
spin_lock(&fs_info->block_group_cache_lock);
write_lock(&fs_info->block_group_cache_lock);
rb_erase_cached(&block_group->cache_node,
&fs_info->block_group_cache_tree);
RB_CLEAR_NODE(&block_group->cache_node);
@ -961,7 +961,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
/* Once for the block groups rbtree */
btrfs_put_block_group(block_group);
spin_unlock(&fs_info->block_group_cache_lock);
write_unlock(&fs_info->block_group_cache_lock);
down_write(&block_group->space_info->groups_sem);
/*
@ -986,7 +986,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
if (block_group->cached == BTRFS_CACHE_STARTED)
btrfs_wait_block_group_cache_done(block_group);
if (block_group->has_caching_ctl) {
spin_lock(&fs_info->block_group_cache_lock);
write_lock(&fs_info->block_group_cache_lock);
if (!caching_ctl) {
struct btrfs_caching_control *ctl;
@ -1000,7 +1000,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
}
if (caching_ctl)
list_del_init(&caching_ctl->list);
spin_unlock(&fs_info->block_group_cache_lock);
write_unlock(&fs_info->block_group_cache_lock);
if (caching_ctl) {
/* Once for the caching bgs list and once for us. */
btrfs_put_caching_control(caching_ctl);
@ -3970,14 +3970,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
struct btrfs_caching_control *caching_ctl;
struct rb_node *n;
spin_lock(&info->block_group_cache_lock);
write_lock(&info->block_group_cache_lock);
while (!list_empty(&info->caching_block_groups)) {
caching_ctl = list_entry(info->caching_block_groups.next,
struct btrfs_caching_control, list);
list_del(&caching_ctl->list);
btrfs_put_caching_control(caching_ctl);
}
spin_unlock(&info->block_group_cache_lock);
write_unlock(&info->block_group_cache_lock);
spin_lock(&info->unused_bgs_lock);
while (!list_empty(&info->unused_bgs)) {
@ -4007,14 +4007,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
}
spin_unlock(&info->zone_active_bgs_lock);
spin_lock(&info->block_group_cache_lock);
write_lock(&info->block_group_cache_lock);
while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) {
block_group = rb_entry(n, struct btrfs_block_group,
cache_node);
rb_erase_cached(&block_group->cache_node,
&info->block_group_cache_tree);
RB_CLEAR_NODE(&block_group->cache_node);
spin_unlock(&info->block_group_cache_lock);
write_unlock(&info->block_group_cache_lock);
down_write(&block_group->space_info->groups_sem);
list_del(&block_group->list);
@ -4037,9 +4037,9 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
ASSERT(block_group->swap_extents == 0);
btrfs_put_block_group(block_group);
spin_lock(&info->block_group_cache_lock);
write_lock(&info->block_group_cache_lock);
}
spin_unlock(&info->block_group_cache_lock);
write_unlock(&info->block_group_cache_lock);
btrfs_release_global_block_rsv(info);

View File

@ -679,7 +679,7 @@ struct btrfs_fs_info {
struct radix_tree_root fs_roots_radix;
/* block group cache stuff */
spinlock_t block_group_cache_lock;
rwlock_t block_group_cache_lock;
struct rb_root_cached block_group_cache_tree;
/* keep track of unallocated space */

View File

@ -3231,7 +3231,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
btrfs_init_balance(fs_info);
btrfs_init_async_reclaim_work(fs_info);
spin_lock_init(&fs_info->block_group_cache_lock);
rwlock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT_CACHED;
extent_io_tree_init(fs_info, &fs_info->excluded_extents,

View File

@ -2497,7 +2497,7 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info)
struct rb_node *leftmost;
u64 bytenr = 0;
spin_lock(&fs_info->block_group_cache_lock);
read_lock(&fs_info->block_group_cache_lock);
/* Get the block group with the lowest logical start address. */
leftmost = rb_first_cached(&fs_info->block_group_cache_tree);
if (leftmost) {
@ -2506,7 +2506,7 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info)
bg = rb_entry(leftmost, struct btrfs_block_group, cache_node);
bytenr = bg->start;
}
spin_unlock(&fs_info->block_group_cache_lock);
read_unlock(&fs_info->block_group_cache_lock);
return bytenr;
}

View File

@ -221,7 +221,7 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
* the caching thread will re-start it's search from 3, and thus find
* the hole from [4,6) to add to the free space cache.
*/
spin_lock(&fs_info->block_group_cache_lock);
write_lock(&fs_info->block_group_cache_lock);
list_for_each_entry_safe(caching_ctl, next,
&fs_info->caching_block_groups, list) {
struct btrfs_block_group *cache = caching_ctl->block_group;
@ -234,7 +234,7 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
cache->last_byte_to_unpin = caching_ctl->progress;
}
}
spin_unlock(&fs_info->block_group_cache_lock);
write_unlock(&fs_info->block_group_cache_lock);
up_write(&fs_info->commit_root_sem);
}