mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: Btrfs: fix meta data raid-repair merge problem Btrfs: skip allocation attempt from empty cluster Btrfs: skip block groups without enough space for a cluster Btrfs: start search for new cluster at the beginning Btrfs: reset cluster's max_size when creating bitmap Btrfs: initialize new bitmaps' list Btrfs: fix oops when calling statfs on readonly device Btrfs: Don't error on resizing FS to same size Btrfs: fix deadlock on metadata reservation when evicting a inode Fix URL of btrfs-progs git repository in docs btrfs scrub: handle -ENOMEM from init_ipath()
This commit is contained in:
commit
b930c26416
@ -63,8 +63,8 @@ IRC network.
|
||||
Userspace tools for creating and manipulating Btrfs file systems are
|
||||
available from the git repository at the following location:
|
||||
|
||||
http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git
|
||||
git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git
|
||||
http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs.git
|
||||
git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs.git
|
||||
|
||||
These include the following tools:
|
||||
|
||||
|
@ -2369,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
|
||||
int btrfs_block_rsv_refill(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 min_reserved);
|
||||
int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 min_reserved);
|
||||
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
|
||||
struct btrfs_block_rsv *dst_rsv,
|
||||
u64 num_bytes);
|
||||
|
@ -3888,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_block_rsv_refill(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 min_reserved)
|
||||
static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 min_reserved, int flush)
|
||||
{
|
||||
u64 num_bytes = 0;
|
||||
int ret = -ENOSPC;
|
||||
@ -3909,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
|
||||
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
|
||||
if (!ret) {
|
||||
block_rsv_add_bytes(block_rsv, num_bytes, 0);
|
||||
return 0;
|
||||
@ -3918,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_block_rsv_refill(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 min_reserved)
|
||||
{
|
||||
return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
|
||||
}
|
||||
|
||||
int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 min_reserved)
|
||||
{
|
||||
return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
|
||||
}
|
||||
|
||||
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
|
||||
struct btrfs_block_rsv *dst_rsv,
|
||||
u64 num_bytes)
|
||||
@ -5265,7 +5279,7 @@ alloc:
|
||||
spin_lock(&block_group->free_space_ctl->tree_lock);
|
||||
if (cached &&
|
||||
block_group->free_space_ctl->free_space <
|
||||
num_bytes + empty_size) {
|
||||
num_bytes + empty_cluster + empty_size) {
|
||||
spin_unlock(&block_group->free_space_ctl->tree_lock);
|
||||
goto loop;
|
||||
}
|
||||
@ -5286,12 +5300,10 @@ alloc:
|
||||
* people trying to start a new cluster
|
||||
*/
|
||||
spin_lock(&last_ptr->refill_lock);
|
||||
if (last_ptr->block_group &&
|
||||
(last_ptr->block_group->ro ||
|
||||
!block_group_bits(last_ptr->block_group, data))) {
|
||||
offset = 0;
|
||||
if (!last_ptr->block_group ||
|
||||
last_ptr->block_group->ro ||
|
||||
!block_group_bits(last_ptr->block_group, data))
|
||||
goto refill_cluster;
|
||||
}
|
||||
|
||||
offset = btrfs_alloc_from_cluster(block_group, last_ptr,
|
||||
num_bytes, search_start);
|
||||
@ -5342,7 +5354,7 @@ refill_cluster:
|
||||
/* allocate a cluster in this block group */
|
||||
ret = btrfs_find_space_cluster(trans, root,
|
||||
block_group, last_ptr,
|
||||
offset, num_bytes,
|
||||
search_start, num_bytes,
|
||||
empty_cluster + empty_size);
|
||||
if (ret == 0) {
|
||||
/*
|
||||
|
@ -2287,14 +2287,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
||||
if (!uptodate) {
|
||||
int failed_mirror;
|
||||
failed_mirror = (int)(unsigned long)bio->bi_bdev;
|
||||
if (tree->ops && tree->ops->readpage_io_failed_hook)
|
||||
ret = tree->ops->readpage_io_failed_hook(
|
||||
bio, page, start, end,
|
||||
failed_mirror, state);
|
||||
else
|
||||
ret = bio_readpage_error(bio, page, start, end,
|
||||
failed_mirror, NULL);
|
||||
/*
|
||||
* The generic bio_readpage_error handles errors the
|
||||
* following way: If possible, new read requests are
|
||||
* created and submitted and will end up in
|
||||
* end_bio_extent_readpage as well (if we're lucky, not
|
||||
* in the !uptodate case). In that case it returns 0 and
|
||||
* we just go on with the next page in our bio. If it
|
||||
* can't handle the error it will return -EIO and we
|
||||
* remain responsible for that page.
|
||||
*/
|
||||
ret = bio_readpage_error(bio, page, start, end,
|
||||
failed_mirror, NULL);
|
||||
if (ret == 0) {
|
||||
error_handled:
|
||||
uptodate =
|
||||
test_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
if (err)
|
||||
@ -2302,6 +2308,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
||||
uncache_state(&cached);
|
||||
continue;
|
||||
}
|
||||
if (tree->ops && tree->ops->readpage_io_failed_hook) {
|
||||
ret = tree->ops->readpage_io_failed_hook(
|
||||
bio, page, start, end,
|
||||
failed_mirror, state);
|
||||
if (ret == 0)
|
||||
goto error_handled;
|
||||
}
|
||||
}
|
||||
|
||||
if (uptodate) {
|
||||
|
@ -1470,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
|
||||
{
|
||||
info->offset = offset_to_bitmap(ctl, offset);
|
||||
info->bytes = 0;
|
||||
INIT_LIST_HEAD(&info->list);
|
||||
link_free_space(ctl, info);
|
||||
ctl->total_bitmaps++;
|
||||
|
||||
@ -2319,6 +2320,7 @@ again:
|
||||
|
||||
if (!found) {
|
||||
start = i;
|
||||
cluster->max_size = 0;
|
||||
found = true;
|
||||
}
|
||||
|
||||
|
@ -3490,7 +3490,7 @@ void btrfs_evict_inode(struct inode *inode)
|
||||
* doing the truncate.
|
||||
*/
|
||||
while (1) {
|
||||
ret = btrfs_block_rsv_refill(root, rsv, min_size);
|
||||
ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
|
||||
|
||||
/*
|
||||
* Try and steal from the global reserve since we will
|
||||
|
@ -1278,7 +1278,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
|
||||
}
|
||||
ret = btrfs_grow_device(trans, device, new_size);
|
||||
btrfs_commit_transaction(trans, root);
|
||||
} else {
|
||||
} else if (new_size < old_size) {
|
||||
ret = btrfs_shrink_device(device, new_size);
|
||||
}
|
||||
|
||||
|
@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
|
||||
btrfs_release_path(swarn->path);
|
||||
|
||||
ipath = init_ipath(4096, local_root, swarn->path);
|
||||
if (IS_ERR(ipath)) {
|
||||
ret = PTR_ERR(ipath);
|
||||
ipath = NULL;
|
||||
goto err;
|
||||
}
|
||||
ret = paths_from_inode(inum, ipath);
|
||||
|
||||
if (ret < 0)
|
||||
|
@ -1057,7 +1057,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
|
||||
int i = 0, nr_devices;
|
||||
int ret;
|
||||
|
||||
nr_devices = fs_info->fs_devices->rw_devices;
|
||||
nr_devices = fs_info->fs_devices->open_devices;
|
||||
BUG_ON(!nr_devices);
|
||||
|
||||
devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
|
||||
@ -1079,8 +1079,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
|
||||
else
|
||||
min_stripe_size = BTRFS_STRIPE_LEN;
|
||||
|
||||
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
|
||||
if (!device->in_fs_metadata)
|
||||
list_for_each_entry(device, &fs_devices->devices, dev_list) {
|
||||
if (!device->in_fs_metadata || !device->bdev)
|
||||
continue;
|
||||
|
||||
avail_space = device->total_bytes - device->bytes_used;
|
||||
|
Loading…
Reference in New Issue
Block a user