Btrfs: Kill allocate_wait in space_info

We already have fs_info->chunk_mutex to avoid concurrent
chunk creation.

Signed-off-by: Yan Zheng <zheng.yan@oracle.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Yan, Zheng 2010-05-16 10:46:25 -04:00 committed by Chris Mason
parent b742bb82f1
commit 424499dbd0
2 changed files with 58 additions and 76 deletions

View File

@ -701,9 +701,7 @@ struct btrfs_space_info {
struct list_head list;
/* for controlling how we free up space for allocations */
wait_queue_head_t allocate_wait;
wait_queue_head_t flush_wait;
int allocating_chunk;
int flushing;
/* for block groups in our same type */

View File

@ -71,6 +71,9 @@ static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
static int maybe_allocate_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_space_info *sinfo, u64 num_bytes);
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
@ -2691,7 +2694,6 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
INIT_LIST_HEAD(&found->block_groups[i]);
init_rwsem(&found->groups_sem);
init_waitqueue_head(&found->flush_wait);
init_waitqueue_head(&found->allocate_wait);
spin_lock_init(&found->lock);
found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
BTRFS_BLOCK_GROUP_SYSTEM |
@ -3004,71 +3006,6 @@ flush:
wake_up(&info->flush_wait);
}
static int maybe_allocate_chunk(struct btrfs_root *root,
struct btrfs_space_info *info)
{
struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
struct btrfs_trans_handle *trans;
bool wait = false;
int ret = 0;
u64 min_metadata;
u64 free_space;
free_space = btrfs_super_total_bytes(disk_super);
/*
* we allow the metadata to grow to a max of either 10gb or 5% of the
* space in the volume.
*/
min_metadata = min((u64)10 * 1024 * 1024 * 1024,
div64_u64(free_space * 5, 100));
if (info->total_bytes >= min_metadata) {
spin_unlock(&info->lock);
return 0;
}
if (info->full) {
spin_unlock(&info->lock);
return 0;
}
if (!info->allocating_chunk) {
info->force_alloc = 1;
info->allocating_chunk = 1;
} else {
wait = true;
}
spin_unlock(&info->lock);
if (wait) {
wait_event(info->allocate_wait,
!info->allocating_chunk);
return 1;
}
trans = btrfs_start_transaction(root, 1);
if (!trans) {
ret = -ENOMEM;
goto out;
}
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4096 + 2 * 1024 * 1024,
info->flags, 0);
btrfs_end_transaction(trans, root);
if (ret)
goto out;
out:
spin_lock(&info->lock);
info->allocating_chunk = 0;
spin_unlock(&info->lock);
wake_up(&info->allocate_wait);
if (ret)
return 0;
return 1;
}
/*
* Reserve metadata space for delalloc.
*/
@ -3109,7 +3046,8 @@ again:
flushed++;
if (flushed == 1) {
if (maybe_allocate_chunk(root, meta_sinfo))
if (maybe_allocate_chunk(NULL, root, meta_sinfo,
num_bytes))
goto again;
flushed++;
} else {
@ -3224,7 +3162,8 @@ again:
if (used > meta_sinfo->total_bytes) {
retries++;
if (retries == 1) {
if (maybe_allocate_chunk(root, meta_sinfo))
if (maybe_allocate_chunk(NULL, root, meta_sinfo,
num_bytes))
goto again;
retries++;
} else {
@ -3421,13 +3360,28 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
rcu_read_unlock();
}
static int should_alloc_chunk(struct btrfs_space_info *sinfo,
u64 alloc_bytes)
{
u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
if (sinfo->bytes_used + sinfo->bytes_reserved +
alloc_bytes + 256 * 1024 * 1024 < num_bytes)
return 0;
if (sinfo->bytes_used + sinfo->bytes_reserved +
alloc_bytes < div_factor(num_bytes, 8))
return 0;
return 1;
}
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 alloc_bytes,
u64 flags, int force)
{
struct btrfs_space_info *space_info;
struct btrfs_fs_info *fs_info = extent_root->fs_info;
u64 thresh;
int ret = 0;
mutex_lock(&fs_info->chunk_mutex);
@ -3450,11 +3404,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
goto out;
}
thresh = space_info->total_bytes - space_info->bytes_readonly;
thresh = div_factor(thresh, 8);
if (!force &&
(space_info->bytes_used + space_info->bytes_pinned +
space_info->bytes_reserved + alloc_bytes) < thresh) {
if (!force && !should_alloc_chunk(space_info, alloc_bytes)) {
spin_unlock(&space_info->lock);
goto out;
}
@ -3476,6 +3426,8 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
spin_lock(&space_info->lock);
if (ret)
space_info->full = 1;
else
ret = 1;
space_info->force_alloc = 0;
spin_unlock(&space_info->lock);
out:
@ -3483,6 +3435,38 @@ out:
return ret;
}
static int maybe_allocate_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_space_info *sinfo, u64 num_bytes)
{
int ret;
int end_trans = 0;
if (sinfo->full)
return 0;
spin_lock(&sinfo->lock);
ret = should_alloc_chunk(sinfo, num_bytes + 2 * 1024 * 1024);
spin_unlock(&sinfo->lock);
if (!ret)
return 0;
if (!trans) {
trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
end_trans = 1;
}
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
num_bytes + 2 * 1024 * 1024,
get_alloc_profile(root, sinfo->flags), 0);
if (end_trans)
btrfs_end_transaction(trans, root);
return ret == 1 ? 1 : 0;
}
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc,