mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
for-5.7-rc1-tag
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAl6VskEACgkQxWXV+ddt WDurrhAAhkxlh6yrdZqr753DcpdEVAQhyHDsJ66GAKWuW8sn7ypTiZhNgKxvEGuz UhwtXTlzZ7K9/h3TsVeih2iqEj6oc8ick+Th+Wf/7s0jhUXDcWi2OqBjTnIiH2Za efrwGMiOEAHYqQ7tHjEbZiJGcQ2tE7+2Le4g3aFnv/kRT0jXDikzLTa/viMG73k5 9llSm+GJYl2KQNcUPmxGKrwwiiV5c5xNCGuEuY4lw+3OVn1QU4rayZDB/5GxZ/nC 72Efl9CxoDunBviys2NWxYTt/Ts3R/+yhnGX0kM6BovkN0bo1pA7HuWkADqYPnNN r8z8X/zFYi7jZBwpPq4alcHW2IaMC7UEseEyZHlj9ce8pK8MnHFlBtfBcUzbvFl5 Wtt23AvAZ9CiQ40Sf5UBt6pliUQhr/BpBz88jatZ619ij1GLxeO++I5bIz3/YFQH UEP7okhoqpxgKLFGRcpxkw0ggOipp7isFyfss2qaRMPebmNMKnuuUoEy5BDlHs2f ewxbyuSUVXVBJMB4R6u77Nk5KLrTO67kfiCROaVKkzhYDESpbB4Trdl+kvzPSFb6 p3NYpJoGnkOKngG/vg5MoQGOp1oi4h3RH2Ck1Yes7jmBgYLSCQokCUXkm52PGfId 25P45yOzwS4W7sVFXsR3rygpexXlcNAIGG+2xtiw/AyFIQo5AZ4= =pkZ2 -----END PGP SIGNATURE----- Merge tag 'for-5.7-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux Pull btrfs fixes from David Sterba: "We have a few regressions and one fix for stable: - revert fsync optimization - fix lost i_size update - fix a space accounting leak - build fix, add back definition of a deprecated ioctl flag - fix search condition for old roots in relocation" * tag 'for-5.7-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: re-instantiate the removed BTRFS_SUBVOL_CREATE_ASYNC definition btrfs: fix reclaim counter leak of space_info objects btrfs: make full fsyncs always operate on the entire file again btrfs: fix lost i_size update after cloning inline extent btrfs: check commit root generation in should_ignore_root
This commit is contained in:
commit
6cc9306b8f
@ -3370,6 +3370,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
||||
space_info->bytes_reserved > 0 ||
|
||||
space_info->bytes_may_use > 0))
|
||||
btrfs_dump_space_info(info, space_info, 0, 0);
|
||||
WARN_ON(space_info->reclaim_size > 0);
|
||||
list_del(&space_info->list);
|
||||
btrfs_sysfs_remove_space_info(space_info);
|
||||
}
|
||||
|
@ -2097,6 +2097,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
|
||||
atomic_inc(&root->log_batch);
|
||||
|
||||
/*
|
||||
* If the inode needs a full sync, make sure we use a full range to
|
||||
* avoid log tree corruption, due to hole detection racing with ordered
|
||||
* extent completion for adjacent ranges and races between logging and
|
||||
* completion of ordered extents for adjancent ranges - both races
|
||||
* could lead to file extent items in the log with overlapping ranges.
|
||||
* Do this while holding the inode lock, to avoid races with other
|
||||
* tasks.
|
||||
*/
|
||||
if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags)) {
|
||||
start = 0;
|
||||
end = LLONG_MAX;
|
||||
}
|
||||
|
||||
/*
|
||||
* Before we acquired the inode's lock, someone may have dirtied more
|
||||
* pages in the target range. We need to make sure that writeback for
|
||||
|
@ -264,6 +264,7 @@ copy_inline_extent:
|
||||
size);
|
||||
inode_add_bytes(dst, datal);
|
||||
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags);
|
||||
ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
|
||||
out:
|
||||
if (!ret && !trans) {
|
||||
/*
|
||||
|
@ -611,8 +611,8 @@ static int should_ignore_root(struct btrfs_root *root)
|
||||
if (!reloc_root)
|
||||
return 0;
|
||||
|
||||
if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
|
||||
root->fs_info->running_transaction->transid - 1)
|
||||
if (btrfs_header_generation(reloc_root->commit_root) ==
|
||||
root->fs_info->running_transaction->transid)
|
||||
return 0;
|
||||
/*
|
||||
* if there is reloc tree and it was created in previous
|
||||
|
@ -361,6 +361,16 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void remove_ticket(struct btrfs_space_info *space_info,
|
||||
struct reserve_ticket *ticket)
|
||||
{
|
||||
if (!list_empty(&ticket->list)) {
|
||||
list_del_init(&ticket->list);
|
||||
ASSERT(space_info->reclaim_size >= ticket->bytes);
|
||||
space_info->reclaim_size -= ticket->bytes;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is for space we already have accounted in space_info->bytes_may_use, so
|
||||
* basically when we're returning space from block_rsv's.
|
||||
@ -388,9 +398,7 @@ again:
|
||||
btrfs_space_info_update_bytes_may_use(fs_info,
|
||||
space_info,
|
||||
ticket->bytes);
|
||||
list_del_init(&ticket->list);
|
||||
ASSERT(space_info->reclaim_size >= ticket->bytes);
|
||||
space_info->reclaim_size -= ticket->bytes;
|
||||
remove_ticket(space_info, ticket);
|
||||
ticket->bytes = 0;
|
||||
space_info->tickets_id++;
|
||||
wake_up(&ticket->wait);
|
||||
@ -899,7 +907,7 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
|
||||
btrfs_info(fs_info, "failing ticket with %llu bytes",
|
||||
ticket->bytes);
|
||||
|
||||
list_del_init(&ticket->list);
|
||||
remove_ticket(space_info, ticket);
|
||||
ticket->error = -ENOSPC;
|
||||
wake_up(&ticket->wait);
|
||||
|
||||
@ -1063,7 +1071,7 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
|
||||
* despite getting an error, resulting in a space leak
|
||||
* (bytes_may_use counter of our space_info).
|
||||
*/
|
||||
list_del_init(&ticket->list);
|
||||
remove_ticket(space_info, ticket);
|
||||
ticket->error = -EINTR;
|
||||
break;
|
||||
}
|
||||
@ -1121,7 +1129,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
|
||||
* either the async reclaim job deletes the ticket from the list
|
||||
* or we delete it ourselves at wait_reserve_ticket().
|
||||
*/
|
||||
list_del_init(&ticket->list);
|
||||
remove_ticket(space_info, ticket);
|
||||
if (!ret)
|
||||
ret = -ENOSPC;
|
||||
}
|
||||
|
@ -96,8 +96,8 @@ enum {
|
||||
static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct btrfs_inode *inode,
|
||||
int inode_only,
|
||||
u64 start,
|
||||
u64 end,
|
||||
const loff_t start,
|
||||
const loff_t end,
|
||||
struct btrfs_log_ctx *ctx);
|
||||
static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
@ -4533,15 +4533,13 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
|
||||
static int btrfs_log_holes(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_inode *inode,
|
||||
struct btrfs_path *path,
|
||||
const u64 start,
|
||||
const u64 end)
|
||||
struct btrfs_path *path)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct btrfs_key key;
|
||||
const u64 ino = btrfs_ino(inode);
|
||||
const u64 i_size = i_size_read(&inode->vfs_inode);
|
||||
u64 prev_extent_end = start;
|
||||
u64 prev_extent_end = 0;
|
||||
int ret;
|
||||
|
||||
if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
|
||||
@ -4549,21 +4547,14 @@ static int btrfs_log_holes(struct btrfs_trans_handle *trans,
|
||||
|
||||
key.objectid = ino;
|
||||
key.type = BTRFS_EXTENT_DATA_KEY;
|
||||
key.offset = start;
|
||||
key.offset = 0;
|
||||
|
||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret > 0 && path->slots[0] > 0) {
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
|
||||
if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
|
||||
path->slots[0]--;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
struct extent_buffer *leaf = path->nodes[0];
|
||||
u64 extent_end;
|
||||
|
||||
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
|
||||
ret = btrfs_next_leaf(root, path);
|
||||
@ -4580,18 +4571,9 @@ static int btrfs_log_holes(struct btrfs_trans_handle *trans,
|
||||
if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
|
||||
break;
|
||||
|
||||
extent_end = btrfs_file_extent_end(path);
|
||||
if (extent_end <= start)
|
||||
goto next_slot;
|
||||
|
||||
/* We have a hole, log it. */
|
||||
if (prev_extent_end < key.offset) {
|
||||
u64 hole_len;
|
||||
|
||||
if (key.offset >= end)
|
||||
hole_len = end - prev_extent_end;
|
||||
else
|
||||
hole_len = key.offset - prev_extent_end;
|
||||
const u64 hole_len = key.offset - prev_extent_end;
|
||||
|
||||
/*
|
||||
* Release the path to avoid deadlocks with other code
|
||||
@ -4621,20 +4603,16 @@ static int btrfs_log_holes(struct btrfs_trans_handle *trans,
|
||||
leaf = path->nodes[0];
|
||||
}
|
||||
|
||||
prev_extent_end = min(extent_end, end);
|
||||
if (extent_end >= end)
|
||||
break;
|
||||
next_slot:
|
||||
prev_extent_end = btrfs_file_extent_end(path);
|
||||
path->slots[0]++;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (prev_extent_end < end && prev_extent_end < i_size) {
|
||||
if (prev_extent_end < i_size) {
|
||||
u64 hole_len;
|
||||
|
||||
btrfs_release_path(path);
|
||||
hole_len = min(ALIGN(i_size, fs_info->sectorsize), end);
|
||||
hole_len -= prev_extent_end;
|
||||
hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
|
||||
ret = btrfs_insert_file_extent(trans, root->log_root,
|
||||
ino, prev_extent_end, 0, 0,
|
||||
hole_len, 0, hole_len,
|
||||
@ -4971,8 +4949,6 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
|
||||
const u64 logged_isize,
|
||||
const bool recursive_logging,
|
||||
const int inode_only,
|
||||
const u64 start,
|
||||
const u64 end,
|
||||
struct btrfs_log_ctx *ctx,
|
||||
bool *need_log_inode_item)
|
||||
{
|
||||
@ -4981,21 +4957,6 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
|
||||
int ins_nr = 0;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We must make sure we don't copy extent items that are entirely out of
|
||||
* the range [start, end - 1]. This is not just an optimization to avoid
|
||||
* copying but also needed to avoid a corruption where we end up with
|
||||
* file extent items in the log tree that have overlapping ranges - this
|
||||
* can happen if we race with ordered extent completion for ranges that
|
||||
* are outside our target range. For example we copy an extent item and
|
||||
* when we move to the next leaf, that extent was trimmed and a new one
|
||||
* covering a subrange of it, but with a higher key, was inserted - we
|
||||
* would then copy this other extent too, resulting in a log tree with
|
||||
* 2 extent items that represent overlapping ranges.
|
||||
*
|
||||
* We can copy the entire extents at the range bondaries however, even
|
||||
* if they cover an area outside the target range. That's ok.
|
||||
*/
|
||||
while (1) {
|
||||
ret = btrfs_search_forward(root, min_key, path, trans->transid);
|
||||
if (ret < 0)
|
||||
@ -5063,29 +5024,6 @@ again:
|
||||
goto next_slot;
|
||||
}
|
||||
|
||||
if (min_key->type == BTRFS_EXTENT_DATA_KEY) {
|
||||
const u64 extent_end = btrfs_file_extent_end(path);
|
||||
|
||||
if (extent_end <= start) {
|
||||
if (ins_nr > 0) {
|
||||
ret = copy_items(trans, inode, dst_path,
|
||||
path, ins_start_slot,
|
||||
ins_nr, inode_only,
|
||||
logged_isize);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ins_nr = 0;
|
||||
}
|
||||
goto next_slot;
|
||||
}
|
||||
if (extent_end >= end) {
|
||||
ins_nr++;
|
||||
if (ins_nr == 1)
|
||||
ins_start_slot = path->slots[0];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
|
||||
ins_nr++;
|
||||
goto next_slot;
|
||||
@ -5151,8 +5089,8 @@ next_key:
|
||||
static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct btrfs_inode *inode,
|
||||
int inode_only,
|
||||
u64 start,
|
||||
u64 end,
|
||||
const loff_t start,
|
||||
const loff_t end,
|
||||
struct btrfs_log_ctx *ctx)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
@ -5180,9 +5118,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
start = ALIGN_DOWN(start, fs_info->sectorsize);
|
||||
end = ALIGN(end, fs_info->sectorsize);
|
||||
|
||||
min_key.objectid = ino;
|
||||
min_key.type = BTRFS_INODE_ITEM_KEY;
|
||||
min_key.offset = 0;
|
||||
@ -5298,8 +5233,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
||||
|
||||
err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
|
||||
path, dst_path, logged_isize,
|
||||
recursive_logging, inode_only,
|
||||
start, end, ctx, &need_log_inode_item);
|
||||
recursive_logging, inode_only, ctx,
|
||||
&need_log_inode_item);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
@ -5312,7 +5247,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
||||
if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
|
||||
btrfs_release_path(path);
|
||||
btrfs_release_path(dst_path);
|
||||
err = btrfs_log_holes(trans, root, inode, path, start, end);
|
||||
err = btrfs_log_holes(trans, root, inode, path);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -36,12 +36,10 @@ struct btrfs_ioctl_vol_args {
|
||||
#define BTRFS_DEVICE_PATH_NAME_MAX 1024
|
||||
#define BTRFS_SUBVOL_NAME_MAX 4039
|
||||
|
||||
/*
|
||||
* Deprecated since 5.7:
|
||||
*
|
||||
* BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
|
||||
*/
|
||||
|
||||
#ifndef __KERNEL__
|
||||
/* Deprecated since 5.7 */
|
||||
# define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
|
||||
#endif
|
||||
#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
|
||||
#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user