mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
btrfs: factor out prepare_allocation() for extent allocation
This function finally factor out prepare_allocation() form find_free_extent(). This function is called before the allocation loop and a specific allocator function like prepare_allocation_clustered() should initialize their private information and can set proper hint_byte to indicate where to start the allocation with. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
45d8e033b2
commit
7e89540942
@ -3853,6 +3853,71 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
|
||||
struct find_free_extent_ctl *ffe_ctl,
|
||||
struct btrfs_space_info *space_info,
|
||||
struct btrfs_key *ins)
|
||||
{
|
||||
/*
|
||||
* If our free space is heavily fragmented we may not be able to make
|
||||
* big contiguous allocations, so instead of doing the expensive search
|
||||
* for free space, simply return ENOSPC with our max_extent_size so we
|
||||
* can go ahead and search for a more manageable chunk.
|
||||
*
|
||||
* If our max_extent_size is large enough for our allocation simply
|
||||
* disable clustering since we will likely not be able to find enough
|
||||
* space to create a cluster and induce latency trying.
|
||||
*/
|
||||
if (space_info->max_extent_size) {
|
||||
spin_lock(&space_info->lock);
|
||||
if (space_info->max_extent_size &&
|
||||
ffe_ctl->num_bytes > space_info->max_extent_size) {
|
||||
ins->offset = space_info->max_extent_size;
|
||||
spin_unlock(&space_info->lock);
|
||||
return -ENOSPC;
|
||||
} else if (space_info->max_extent_size) {
|
||||
ffe_ctl->use_cluster = false;
|
||||
}
|
||||
spin_unlock(&space_info->lock);
|
||||
}
|
||||
|
||||
ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info,
|
||||
&ffe_ctl->empty_cluster);
|
||||
if (ffe_ctl->last_ptr) {
|
||||
struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
|
||||
|
||||
spin_lock(&last_ptr->lock);
|
||||
if (last_ptr->block_group)
|
||||
ffe_ctl->hint_byte = last_ptr->window_start;
|
||||
if (last_ptr->fragmented) {
|
||||
/*
|
||||
* We still set window_start so we can keep track of the
|
||||
* last place we found an allocation to try and save
|
||||
* some time.
|
||||
*/
|
||||
ffe_ctl->hint_byte = last_ptr->window_start;
|
||||
ffe_ctl->use_cluster = false;
|
||||
}
|
||||
spin_unlock(&last_ptr->lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int prepare_allocation(struct btrfs_fs_info *fs_info,
|
||||
struct find_free_extent_ctl *ffe_ctl,
|
||||
struct btrfs_space_info *space_info,
|
||||
struct btrfs_key *ins)
|
||||
{
|
||||
switch (ffe_ctl->policy) {
|
||||
case BTRFS_EXTENT_ALLOC_CLUSTERED:
|
||||
return prepare_allocation_clustered(fs_info, ffe_ctl,
|
||||
space_info, ins);
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* walks the btree of allocated extents and find a hole of a given size.
|
||||
* The key ins is changed to record the hole:
|
||||
@ -3922,48 +3987,9 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/*
|
||||
* If our free space is heavily fragmented we may not be able to make
|
||||
* big contiguous allocations, so instead of doing the expensive search
|
||||
* for free space, simply return ENOSPC with our max_extent_size so we
|
||||
* can go ahead and search for a more manageable chunk.
|
||||
*
|
||||
* If our max_extent_size is large enough for our allocation simply
|
||||
* disable clustering since we will likely not be able to find enough
|
||||
* space to create a cluster and induce latency trying.
|
||||
*/
|
||||
if (unlikely(space_info->max_extent_size)) {
|
||||
spin_lock(&space_info->lock);
|
||||
if (space_info->max_extent_size &&
|
||||
num_bytes > space_info->max_extent_size) {
|
||||
ins->offset = space_info->max_extent_size;
|
||||
spin_unlock(&space_info->lock);
|
||||
return -ENOSPC;
|
||||
} else if (space_info->max_extent_size) {
|
||||
ffe_ctl.use_cluster = false;
|
||||
}
|
||||
spin_unlock(&space_info->lock);
|
||||
}
|
||||
|
||||
ffe_ctl.last_ptr = fetch_cluster_info(fs_info, space_info,
|
||||
&ffe_ctl.empty_cluster);
|
||||
if (ffe_ctl.last_ptr) {
|
||||
struct btrfs_free_cluster *last_ptr = ffe_ctl.last_ptr;
|
||||
|
||||
spin_lock(&last_ptr->lock);
|
||||
if (last_ptr->block_group)
|
||||
ffe_ctl.hint_byte = last_ptr->window_start;
|
||||
if (last_ptr->fragmented) {
|
||||
/*
|
||||
* We still set window_start so we can keep track of the
|
||||
* last place we found an allocation to try and save
|
||||
* some time.
|
||||
*/
|
||||
ffe_ctl.hint_byte = last_ptr->window_start;
|
||||
ffe_ctl.use_cluster = false;
|
||||
}
|
||||
spin_unlock(&last_ptr->lock);
|
||||
}
|
||||
ret = prepare_allocation(fs_info, &ffe_ctl, space_info, ins);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ffe_ctl.search_start = max(ffe_ctl.search_start,
|
||||
first_logical_byte(fs_info, 0));
|
||||
|
Loading…
Reference in New Issue
Block a user