mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs fixes from Chris Mason: "These are assorted fixes, mostly from Josef nailing down xfstests runs. Zach also has a long standing fix for problems with readdir wrapping f_pos (or ctx->pos) These patches were spread out over different bases, so I rebased things on top of rc4 and retested overnight" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: btrfs: don't loop on large offsets in readdir Btrfs: check to see if root_list is empty before adding it to dead roots Btrfs: release both paths before logging dir/changed extents Btrfs: allow splitting of hole em's when dropping extent cache Btrfs: make sure the backref walker catches all refs to our extent Btrfs: fix backref walking when we hit a compressed extent Btrfs: do not offset physical if we're compressed Btrfs: fix extent buffer leak after backref walking Btrfs: fix a bug of snapshot-aware defrag to make it work on partial extents btrfs: fix file truncation if FALLOC_FL_KEEP_SIZE is specified
This commit is contained in:
commit
d92581fcad
@ -36,16 +36,23 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
|
|||||||
u64 extent_item_pos,
|
u64 extent_item_pos,
|
||||||
struct extent_inode_elem **eie)
|
struct extent_inode_elem **eie)
|
||||||
{
|
{
|
||||||
u64 data_offset;
|
u64 offset = 0;
|
||||||
u64 data_len;
|
|
||||||
struct extent_inode_elem *e;
|
struct extent_inode_elem *e;
|
||||||
|
|
||||||
data_offset = btrfs_file_extent_offset(eb, fi);
|
if (!btrfs_file_extent_compression(eb, fi) &&
|
||||||
data_len = btrfs_file_extent_num_bytes(eb, fi);
|
!btrfs_file_extent_encryption(eb, fi) &&
|
||||||
|
!btrfs_file_extent_other_encoding(eb, fi)) {
|
||||||
|
u64 data_offset;
|
||||||
|
u64 data_len;
|
||||||
|
|
||||||
if (extent_item_pos < data_offset ||
|
data_offset = btrfs_file_extent_offset(eb, fi);
|
||||||
extent_item_pos >= data_offset + data_len)
|
data_len = btrfs_file_extent_num_bytes(eb, fi);
|
||||||
return 1;
|
|
||||||
|
if (extent_item_pos < data_offset ||
|
||||||
|
extent_item_pos >= data_offset + data_len)
|
||||||
|
return 1;
|
||||||
|
offset = extent_item_pos - data_offset;
|
||||||
|
}
|
||||||
|
|
||||||
e = kmalloc(sizeof(*e), GFP_NOFS);
|
e = kmalloc(sizeof(*e), GFP_NOFS);
|
||||||
if (!e)
|
if (!e)
|
||||||
@ -53,7 +60,7 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
|
|||||||
|
|
||||||
e->next = *eie;
|
e->next = *eie;
|
||||||
e->inum = key->objectid;
|
e->inum = key->objectid;
|
||||||
e->offset = key->offset + (extent_item_pos - data_offset);
|
e->offset = key->offset + offset;
|
||||||
*eie = e;
|
*eie = e;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -189,7 +196,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
|
|||||||
struct extent_buffer *eb;
|
struct extent_buffer *eb;
|
||||||
struct btrfs_key key;
|
struct btrfs_key key;
|
||||||
struct btrfs_file_extent_item *fi;
|
struct btrfs_file_extent_item *fi;
|
||||||
struct extent_inode_elem *eie = NULL;
|
struct extent_inode_elem *eie = NULL, *old = NULL;
|
||||||
u64 disk_byte;
|
u64 disk_byte;
|
||||||
|
|
||||||
if (level != 0) {
|
if (level != 0) {
|
||||||
@ -223,6 +230,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
|
|||||||
|
|
||||||
if (disk_byte == wanted_disk_byte) {
|
if (disk_byte == wanted_disk_byte) {
|
||||||
eie = NULL;
|
eie = NULL;
|
||||||
|
old = NULL;
|
||||||
if (extent_item_pos) {
|
if (extent_item_pos) {
|
||||||
ret = check_extent_in_eb(&key, eb, fi,
|
ret = check_extent_in_eb(&key, eb, fi,
|
||||||
*extent_item_pos,
|
*extent_item_pos,
|
||||||
@ -230,18 +238,20 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (!ret) {
|
if (ret > 0)
|
||||||
ret = ulist_add(parents, eb->start,
|
goto next;
|
||||||
(uintptr_t)eie, GFP_NOFS);
|
ret = ulist_add_merge(parents, eb->start,
|
||||||
if (ret < 0)
|
(uintptr_t)eie,
|
||||||
break;
|
(u64 *)&old, GFP_NOFS);
|
||||||
if (!extent_item_pos) {
|
if (ret < 0)
|
||||||
ret = btrfs_next_old_leaf(root, path,
|
break;
|
||||||
time_seq);
|
if (!ret && extent_item_pos) {
|
||||||
continue;
|
while (old->next)
|
||||||
}
|
old = old->next;
|
||||||
|
old->next = eie;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
next:
|
||||||
ret = btrfs_next_old_item(root, path, time_seq);
|
ret = btrfs_next_old_item(root, path, time_seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1271,7 +1271,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
|
|||||||
BUG_ON(!eb_rewin);
|
BUG_ON(!eb_rewin);
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_buffer_get(eb_rewin);
|
|
||||||
btrfs_tree_read_unlock(eb);
|
btrfs_tree_read_unlock(eb);
|
||||||
free_extent_buffer(eb);
|
free_extent_buffer(eb);
|
||||||
|
|
||||||
|
@ -4048,7 +4048,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (!end) {
|
while (!end) {
|
||||||
u64 offset_in_extent;
|
u64 offset_in_extent = 0;
|
||||||
|
|
||||||
/* break if the extent we found is outside the range */
|
/* break if the extent we found is outside the range */
|
||||||
if (em->start >= max || extent_map_end(em) < off)
|
if (em->start >= max || extent_map_end(em) < off)
|
||||||
@ -4064,9 +4064,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* record the offset from the start of the extent
|
* record the offset from the start of the extent
|
||||||
* for adjusting the disk offset below
|
* for adjusting the disk offset below. Only do this if the
|
||||||
|
* extent isn't compressed since our in ram offset may be past
|
||||||
|
* what we have actually allocated on disk.
|
||||||
*/
|
*/
|
||||||
offset_in_extent = em_start - em->start;
|
if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
|
||||||
|
offset_in_extent = em_start - em->start;
|
||||||
em_end = extent_map_end(em);
|
em_end = extent_map_end(em);
|
||||||
em_len = em_end - em_start;
|
em_len = em_end - em_start;
|
||||||
emflags = em->flags;
|
emflags = em->flags;
|
||||||
|
@ -596,20 +596,29 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
|
|||||||
if (no_splits)
|
if (no_splits)
|
||||||
goto next;
|
goto next;
|
||||||
|
|
||||||
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
|
if (em->start < start) {
|
||||||
em->start < start) {
|
|
||||||
split->start = em->start;
|
split->start = em->start;
|
||||||
split->len = start - em->start;
|
split->len = start - em->start;
|
||||||
split->orig_start = em->orig_start;
|
|
||||||
split->block_start = em->block_start;
|
|
||||||
|
|
||||||
if (compressed)
|
if (em->block_start < EXTENT_MAP_LAST_BYTE) {
|
||||||
split->block_len = em->block_len;
|
split->orig_start = em->orig_start;
|
||||||
else
|
split->block_start = em->block_start;
|
||||||
split->block_len = split->len;
|
|
||||||
split->ram_bytes = em->ram_bytes;
|
if (compressed)
|
||||||
split->orig_block_len = max(split->block_len,
|
split->block_len = em->block_len;
|
||||||
em->orig_block_len);
|
else
|
||||||
|
split->block_len = split->len;
|
||||||
|
split->orig_block_len = max(split->block_len,
|
||||||
|
em->orig_block_len);
|
||||||
|
split->ram_bytes = em->ram_bytes;
|
||||||
|
} else {
|
||||||
|
split->orig_start = split->start;
|
||||||
|
split->block_len = 0;
|
||||||
|
split->block_start = em->block_start;
|
||||||
|
split->orig_block_len = 0;
|
||||||
|
split->ram_bytes = split->len;
|
||||||
|
}
|
||||||
|
|
||||||
split->generation = gen;
|
split->generation = gen;
|
||||||
split->bdev = em->bdev;
|
split->bdev = em->bdev;
|
||||||
split->flags = flags;
|
split->flags = flags;
|
||||||
@ -620,8 +629,7 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
|
|||||||
split = split2;
|
split = split2;
|
||||||
split2 = NULL;
|
split2 = NULL;
|
||||||
}
|
}
|
||||||
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
|
if (testend && em->start + em->len > start + len) {
|
||||||
testend && em->start + em->len > start + len) {
|
|
||||||
u64 diff = start + len - em->start;
|
u64 diff = start + len - em->start;
|
||||||
|
|
||||||
split->start = start + len;
|
split->start = start + len;
|
||||||
@ -630,18 +638,28 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
|
|||||||
split->flags = flags;
|
split->flags = flags;
|
||||||
split->compress_type = em->compress_type;
|
split->compress_type = em->compress_type;
|
||||||
split->generation = gen;
|
split->generation = gen;
|
||||||
split->orig_block_len = max(em->block_len,
|
|
||||||
em->orig_block_len);
|
|
||||||
split->ram_bytes = em->ram_bytes;
|
|
||||||
|
|
||||||
if (compressed) {
|
if (em->block_start < EXTENT_MAP_LAST_BYTE) {
|
||||||
split->block_len = em->block_len;
|
split->orig_block_len = max(em->block_len,
|
||||||
split->block_start = em->block_start;
|
em->orig_block_len);
|
||||||
split->orig_start = em->orig_start;
|
|
||||||
|
split->ram_bytes = em->ram_bytes;
|
||||||
|
if (compressed) {
|
||||||
|
split->block_len = em->block_len;
|
||||||
|
split->block_start = em->block_start;
|
||||||
|
split->orig_start = em->orig_start;
|
||||||
|
} else {
|
||||||
|
split->block_len = split->len;
|
||||||
|
split->block_start = em->block_start
|
||||||
|
+ diff;
|
||||||
|
split->orig_start = em->orig_start;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
split->block_len = split->len;
|
split->ram_bytes = split->len;
|
||||||
split->block_start = em->block_start + diff;
|
split->orig_start = split->start;
|
||||||
split->orig_start = em->orig_start;
|
split->block_len = 0;
|
||||||
|
split->block_start = em->block_start;
|
||||||
|
split->orig_block_len = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = add_extent_mapping(em_tree, split, modified);
|
ret = add_extent_mapping(em_tree, split, modified);
|
||||||
|
@ -2166,16 +2166,23 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
|
|||||||
if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
|
if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
extent_offset = btrfs_file_extent_offset(leaf, extent);
|
/*
|
||||||
if (key.offset - extent_offset != offset)
|
* 'offset' refers to the exact key.offset,
|
||||||
|
* NOT the 'offset' field in btrfs_extent_data_ref, ie.
|
||||||
|
* (key.offset - extent_offset).
|
||||||
|
*/
|
||||||
|
if (key.offset != offset)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
extent_offset = btrfs_file_extent_offset(leaf, extent);
|
||||||
num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
|
num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
|
||||||
|
|
||||||
if (extent_offset >= old->extent_offset + old->offset +
|
if (extent_offset >= old->extent_offset + old->offset +
|
||||||
old->len || extent_offset + num_bytes <=
|
old->len || extent_offset + num_bytes <=
|
||||||
old->extent_offset + old->offset)
|
old->extent_offset + old->offset)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2187,7 +2194,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
|
|||||||
|
|
||||||
backref->root_id = root_id;
|
backref->root_id = root_id;
|
||||||
backref->inum = inum;
|
backref->inum = inum;
|
||||||
backref->file_pos = offset + extent_offset;
|
backref->file_pos = offset;
|
||||||
backref->num_bytes = num_bytes;
|
backref->num_bytes = num_bytes;
|
||||||
backref->extent_offset = extent_offset;
|
backref->extent_offset = extent_offset;
|
||||||
backref->generation = btrfs_file_extent_generation(leaf, extent);
|
backref->generation = btrfs_file_extent_generation(leaf, extent);
|
||||||
@ -2210,7 +2217,8 @@ static noinline bool record_extent_backrefs(struct btrfs_path *path,
|
|||||||
new->path = path;
|
new->path = path;
|
||||||
|
|
||||||
list_for_each_entry_safe(old, tmp, &new->head, list) {
|
list_for_each_entry_safe(old, tmp, &new->head, list) {
|
||||||
ret = iterate_inodes_from_logical(old->bytenr, fs_info,
|
ret = iterate_inodes_from_logical(old->bytenr +
|
||||||
|
old->extent_offset, fs_info,
|
||||||
path, record_one_backref,
|
path, record_one_backref,
|
||||||
old);
|
old);
|
||||||
BUG_ON(ret < 0 && ret != -ENOENT);
|
BUG_ON(ret < 0 && ret != -ENOENT);
|
||||||
@ -4391,9 +4399,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
|
|||||||
int mask = attr->ia_valid;
|
int mask = attr->ia_valid;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (newsize == oldsize)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
|
* The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
|
||||||
* special case where we need to update the times despite not having
|
* special case where we need to update the times despite not having
|
||||||
@ -5165,14 +5170,31 @@ next:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Reached end of directory/root. Bump pos past the last item. */
|
/* Reached end of directory/root. Bump pos past the last item. */
|
||||||
if (key_type == BTRFS_DIR_INDEX_KEY)
|
ctx->pos++;
|
||||||
/*
|
|
||||||
* 32-bit glibc will use getdents64, but then strtol -
|
/*
|
||||||
* so the last number we can serve is this.
|
* Stop new entries from being returned after we return the last
|
||||||
*/
|
* entry.
|
||||||
ctx->pos = 0x7fffffff;
|
*
|
||||||
else
|
* New directory entries are assigned a strictly increasing
|
||||||
ctx->pos++;
|
* offset. This means that new entries created during readdir
|
||||||
|
* are *guaranteed* to be seen in the future by that readdir.
|
||||||
|
* This has broken buggy programs which operate on names as
|
||||||
|
* they're returned by readdir. Until we re-use freed offsets
|
||||||
|
* we have this hack to stop new entries from being returned
|
||||||
|
* under the assumption that they'll never reach this huge
|
||||||
|
* offset.
|
||||||
|
*
|
||||||
|
* This is being careful not to overflow 32bit loff_t unless the
|
||||||
|
* last entry requires it because doing so has broken 32bit apps
|
||||||
|
* in the past.
|
||||||
|
*/
|
||||||
|
if (key_type == BTRFS_DIR_INDEX_KEY) {
|
||||||
|
if (ctx->pos >= INT_MAX)
|
||||||
|
ctx->pos = LLONG_MAX;
|
||||||
|
else
|
||||||
|
ctx->pos = INT_MAX;
|
||||||
|
}
|
||||||
nopos:
|
nopos:
|
||||||
ret = 0;
|
ret = 0;
|
||||||
err:
|
err:
|
||||||
|
@ -983,12 +983,12 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
|
|||||||
* a dirty root struct and adds it into the list of dead roots that need to
|
* a dirty root struct and adds it into the list of dead roots that need to
|
||||||
* be deleted
|
* be deleted
|
||||||
*/
|
*/
|
||||||
int btrfs_add_dead_root(struct btrfs_root *root)
|
void btrfs_add_dead_root(struct btrfs_root *root)
|
||||||
{
|
{
|
||||||
spin_lock(&root->fs_info->trans_lock);
|
spin_lock(&root->fs_info->trans_lock);
|
||||||
list_add_tail(&root->root_list, &root->fs_info->dead_roots);
|
if (list_empty(&root->root_list))
|
||||||
|
list_add_tail(&root->root_list, &root->fs_info->dead_roots);
|
||||||
spin_unlock(&root->fs_info->trans_lock);
|
spin_unlock(&root->fs_info->trans_lock);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1925,7 +1925,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
|
|||||||
}
|
}
|
||||||
root = list_first_entry(&fs_info->dead_roots,
|
root = list_first_entry(&fs_info->dead_roots,
|
||||||
struct btrfs_root, root_list);
|
struct btrfs_root, root_list);
|
||||||
list_del(&root->root_list);
|
list_del_init(&root->root_list);
|
||||||
spin_unlock(&fs_info->trans_lock);
|
spin_unlock(&fs_info->trans_lock);
|
||||||
|
|
||||||
pr_debug("btrfs: cleaner removing %llu\n",
|
pr_debug("btrfs: cleaner removing %llu\n",
|
||||||
|
@ -143,7 +143,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
|
|||||||
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
|
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_root *root);
|
struct btrfs_root *root);
|
||||||
|
|
||||||
int btrfs_add_dead_root(struct btrfs_root *root);
|
void btrfs_add_dead_root(struct btrfs_root *root);
|
||||||
int btrfs_defrag_root(struct btrfs_root *root);
|
int btrfs_defrag_root(struct btrfs_root *root);
|
||||||
int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
|
int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
|
||||||
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
||||||
|
@ -3746,8 +3746,9 @@ next_slot:
|
|||||||
}
|
}
|
||||||
|
|
||||||
log_extents:
|
log_extents:
|
||||||
|
btrfs_release_path(path);
|
||||||
|
btrfs_release_path(dst_path);
|
||||||
if (fast_search) {
|
if (fast_search) {
|
||||||
btrfs_release_path(dst_path);
|
|
||||||
ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
|
ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
err = ret;
|
err = ret;
|
||||||
@ -3764,8 +3765,6 @@ log_extents:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
|
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
|
||||||
btrfs_release_path(path);
|
|
||||||
btrfs_release_path(dst_path);
|
|
||||||
ret = log_directory_changes(trans, root, inode, path, dst_path);
|
ret = log_directory_changes(trans, root, inode, path, dst_path);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
err = ret;
|
err = ret;
|
||||||
|
Loading…
Reference in New Issue
Block a user