btrfs: Convert fs_info->free_chunk_space to atomic64_t

The ->free_chunk_space variable is used to track the unallocated space
and access to it is protected by a spinlock, which is not used for
anything else.  Make the code a bit self-explanatory by switching the
variable to an atomic64_t type and kill the spinlock.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
[ not a performance critical code, use of atomic type is ok ]
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Nikolay Borisov 2017-05-11 09:17:46 +03:00 committed by David Sterba
parent 401b41e5a8
commit a5ed45f822
4 changed files with 10 additions and 26 deletions

View File

@ -748,8 +748,7 @@ struct btrfs_fs_info {
struct rb_root block_group_cache_tree; struct rb_root block_group_cache_tree;
/* keep track of unallocated space */ /* keep track of unallocated space */
spinlock_t free_chunk_lock; atomic64_t free_chunk_space;
u64 free_chunk_space;
struct extent_io_tree freed_extents[2]; struct extent_io_tree freed_extents[2];
struct extent_io_tree *pinned_extents; struct extent_io_tree *pinned_extents;

View File

@ -2626,7 +2626,6 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->fs_roots_radix_lock);
spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->delayed_iput_lock);
spin_lock_init(&fs_info->defrag_inodes_lock); spin_lock_init(&fs_info->defrag_inodes_lock);
spin_lock_init(&fs_info->free_chunk_lock);
spin_lock_init(&fs_info->tree_mod_seq_lock); spin_lock_init(&fs_info->tree_mod_seq_lock);
spin_lock_init(&fs_info->super_lock); spin_lock_init(&fs_info->super_lock);
spin_lock_init(&fs_info->qgroup_op_lock); spin_lock_init(&fs_info->qgroup_op_lock);
@ -2667,7 +2666,7 @@ int open_ctree(struct super_block *sb,
fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
fs_info->metadata_ratio = 0; fs_info->metadata_ratio = 0;
fs_info->defrag_inodes = RB_ROOT; fs_info->defrag_inodes = RB_ROOT;
fs_info->free_chunk_space = 0; atomic64_set(&fs_info->free_chunk_space, 0);
fs_info->tree_mod_log = RB_ROOT; fs_info->tree_mod_log = RB_ROOT;
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */

View File

@ -4646,9 +4646,7 @@ static int can_overcommit(struct btrfs_root *root,
used += space_info->bytes_may_use; used += space_info->bytes_may_use;
spin_lock(&fs_info->free_chunk_lock); avail = atomic64_read(&fs_info->free_chunk_space);
avail = fs_info->free_chunk_space;
spin_unlock(&fs_info->free_chunk_lock);
/* /*
* If we have dup, raid1 or raid10 then only half of the free * If we have dup, raid1 or raid10 then only half of the free

View File

@ -2417,9 +2417,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
fs_info->fs_devices->total_devices++; fs_info->fs_devices->total_devices++;
fs_info->fs_devices->total_rw_bytes += device->total_bytes; fs_info->fs_devices->total_rw_bytes += device->total_bytes;
spin_lock(&fs_info->free_chunk_lock); atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
fs_info->free_chunk_space += device->total_bytes;
spin_unlock(&fs_info->free_chunk_lock);
if (!blk_queue_nonrot(q)) if (!blk_queue_nonrot(q))
fs_info->fs_devices->rotating = 1; fs_info->fs_devices->rotating = 1;
@ -2874,9 +2872,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
mutex_lock(&fs_info->chunk_mutex); mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_bytes_used(device, btrfs_device_set_bytes_used(device,
device->bytes_used - dev_extent_len); device->bytes_used - dev_extent_len);
spin_lock(&fs_info->free_chunk_lock); atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
fs_info->free_chunk_space += dev_extent_len;
spin_unlock(&fs_info->free_chunk_lock);
btrfs_clear_space_info_full(fs_info); btrfs_clear_space_info_full(fs_info);
mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex);
} }
@ -4409,9 +4405,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
btrfs_device_set_total_bytes(device, new_size); btrfs_device_set_total_bytes(device, new_size);
if (device->writeable) { if (device->writeable) {
device->fs_devices->total_rw_bytes -= diff; device->fs_devices->total_rw_bytes -= diff;
spin_lock(&fs_info->free_chunk_lock); atomic64_sub(diff, &fs_info->free_chunk_space);
fs_info->free_chunk_space -= diff;
spin_unlock(&fs_info->free_chunk_lock);
} }
mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex);
@ -4535,9 +4529,7 @@ done:
btrfs_device_set_total_bytes(device, old_size); btrfs_device_set_total_bytes(device, old_size);
if (device->writeable) if (device->writeable)
device->fs_devices->total_rw_bytes += diff; device->fs_devices->total_rw_bytes += diff;
spin_lock(&fs_info->free_chunk_lock); atomic64_add(diff, &fs_info->free_chunk_space);
fs_info->free_chunk_space += diff;
spin_unlock(&fs_info->free_chunk_lock);
mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex);
} }
return ret; return ret;
@ -4882,9 +4874,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes); btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
} }
spin_lock(&info->free_chunk_lock); atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space);
info->free_chunk_space -= (stripe_size * map->num_stripes);
spin_unlock(&info->free_chunk_lock);
free_extent_map(em); free_extent_map(em);
check_raid56_incompat_flag(info, type); check_raid56_incompat_flag(info, type);
@ -6684,10 +6674,8 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
device->in_fs_metadata = 1; device->in_fs_metadata = 1;
if (device->writeable && !device->is_tgtdev_for_dev_replace) { if (device->writeable && !device->is_tgtdev_for_dev_replace) {
device->fs_devices->total_rw_bytes += device->total_bytes; device->fs_devices->total_rw_bytes += device->total_bytes;
spin_lock(&fs_info->free_chunk_lock); atomic64_add(device->total_bytes - device->bytes_used,
fs_info->free_chunk_space += device->total_bytes - &fs_info->free_chunk_space);
device->bytes_used;
spin_unlock(&fs_info->free_chunk_lock);
} }
ret = 0; ret = 0;
return ret; return ret;