mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
btrfs: zoned: activate block group only for extent allocation
In btrfs_make_block_group(), we activate the allocated block group,
expecting that the block group is soon used for allocation. However, the
chunk allocation from flush_space() context broke the assumption. There
can be a large time gap between the chunk allocation time and the extent
allocation time from the chunk.
Activating the empty block groups pre-allocated from flush_space()
context can exhaust the active zone counter of a device. Once we use all
the active zone counts for empty pre-allocated block groups, we cannot
activate new block group for the other things: metadata, tree-log, or
data relocation block group. That failure results in a fake -ENOSPC.
This patch introduces CHUNK_ALLOC_FORCE_FOR_EXTENT to distinguish the
chunk allocation from find_free_extent(). Now, the new block group is
activated only in that context.
Fixes: eb66a010d5
("btrfs: zoned: activate new block group")
CC: stable@vger.kernel.org # 5.16+
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Tested-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
820c363bd5
commit
760e69c4c2
@ -2503,12 +2503,6 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* New block group is likely to be used soon. Try to activate it now.
|
||||
* Failure is OK for now.
|
||||
*/
|
||||
btrfs_zone_activate(cache);
|
||||
|
||||
ret = exclude_super_stripes(cache);
|
||||
if (ret) {
|
||||
/* We may have excluded something, so call this just in case */
|
||||
@ -3660,8 +3654,14 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
|
||||
struct btrfs_block_group *ret_bg;
|
||||
bool wait_for_alloc = false;
|
||||
bool should_alloc = false;
|
||||
bool from_extent_allocation = false;
|
||||
int ret = 0;
|
||||
|
||||
if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
|
||||
from_extent_allocation = true;
|
||||
force = CHUNK_ALLOC_FORCE;
|
||||
}
|
||||
|
||||
/* Don't re-enter if we're already allocating a chunk */
|
||||
if (trans->allocating_chunk)
|
||||
return -ENOSPC;
|
||||
@ -3754,9 +3754,17 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
|
||||
ret_bg = do_chunk_alloc(trans, flags);
|
||||
trans->allocating_chunk = false;
|
||||
|
||||
if (IS_ERR(ret_bg))
|
||||
if (IS_ERR(ret_bg)) {
|
||||
ret = PTR_ERR(ret_bg);
|
||||
else
|
||||
} else if (from_extent_allocation) {
|
||||
/*
|
||||
* New block group is likely to be used soon. Try to activate
|
||||
* it now. Failure is OK for now.
|
||||
*/
|
||||
btrfs_zone_activate(ret_bg);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
btrfs_put_block_group(ret_bg);
|
||||
|
||||
spin_lock(&space_info->lock);
|
||||
|
@ -35,11 +35,15 @@ enum btrfs_discard_state {
|
||||
* the FS with empty chunks
|
||||
*
|
||||
* CHUNK_ALLOC_FORCE means it must try to allocate one
|
||||
*
|
||||
* CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
|
||||
* find_free_extent() that also activaes the zone
|
||||
*/
|
||||
enum btrfs_chunk_alloc_enum {
|
||||
CHUNK_ALLOC_NO_FORCE,
|
||||
CHUNK_ALLOC_LIMITED,
|
||||
CHUNK_ALLOC_FORCE,
|
||||
CHUNK_ALLOC_FORCE_FOR_EXTENT,
|
||||
};
|
||||
|
||||
struct btrfs_caching_control {
|
||||
|
@ -4082,7 +4082,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
|
||||
}
|
||||
|
||||
ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
|
||||
CHUNK_ALLOC_FORCE);
|
||||
CHUNK_ALLOC_FORCE_FOR_EXTENT);
|
||||
|
||||
/* Do not bail out on ENOSPC since we can do more. */
|
||||
if (ret == -ENOSPC)
|
||||
|
Loading…
Reference in New Issue
Block a user