mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
Merge branch 'for-linus-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs updates from Chris Mason: "This has our usual assortment of fixes and cleanups, but the biggest change included is Omar Sandoval's free space tree. It's not the default yet, mounting -o space_cache=v2 enables it and sets a readonly compat bit. The tree can actually be deleted and regenerated if there are any problems, but it has held up really well in testing so far. For very large filesystems (30T+) our existing free space caching code can end up taking a huge amount of time during commits. The new tree based code is faster and less work overall to update as the commit progresses. Omar worked on this during the summer and we'll hammer on it in production here at FB over the next few months" * 'for-linus-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (73 commits) Btrfs: fix fitrim discarding device area reserved for boot loader's use Btrfs: Check metadata redundancy on balance btrfs: statfs: report zero available if metadata are exhausted btrfs: preallocate path for snapshot creation at ioctl time btrfs: allocate root item at snapshot ioctl time btrfs: do an allocation earlier during snapshot creation btrfs: use smaller type for btrfs_path locks btrfs: use smaller type for btrfs_path lowest_level btrfs: use smaller type for btrfs_path reada btrfs: cleanup, use enum values for btrfs_path reada btrfs: constify static arrays btrfs: constify remaining structs with function pointers btrfs tests: replace whole ops structure for free space tests btrfs: use list_for_each_entry* in backref.c btrfs: use list_for_each_entry_safe in free-space-cache.c btrfs: use list_for_each_entry* in check-integrity.c Btrfs: use linux/sizes.h to represent constants btrfs: cleanup, remove stray return statements btrfs: zero out delayed node upon allocation btrfs: pass proper enum type to start_transaction() ...
This commit is contained in:
commit
c1a198d923
@ -9,11 +9,12 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
|
|||||||
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
|
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
|
||||||
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
|
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
|
||||||
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
|
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
|
||||||
uuid-tree.o props.o hash.o
|
uuid-tree.o props.o hash.o free-space-tree.o
|
||||||
|
|
||||||
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
|
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
|
||||||
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
|
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
|
||||||
|
|
||||||
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
|
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
|
||||||
tests/extent-buffer-tests.o tests/btrfs-tests.o \
|
tests/extent-buffer-tests.o tests/btrfs-tests.o \
|
||||||
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o
|
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \
|
||||||
|
tests/free-space-tree-tests.o
|
||||||
|
@ -48,7 +48,7 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
|
|||||||
|
|
||||||
size = __btrfs_getxattr(inode, name, "", 0);
|
size = __btrfs_getxattr(inode, name, "", 0);
|
||||||
if (size > 0) {
|
if (size > 0) {
|
||||||
value = kzalloc(size, GFP_NOFS);
|
value = kzalloc(size, GFP_KERNEL);
|
||||||
if (!value)
|
if (!value)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
size = __btrfs_getxattr(inode, name, value, size);
|
size = __btrfs_getxattr(inode, name, value, size);
|
||||||
@ -102,7 +102,7 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
|
|||||||
|
|
||||||
if (acl) {
|
if (acl) {
|
||||||
size = posix_acl_xattr_size(acl->a_count);
|
size = posix_acl_xattr_size(acl->a_count);
|
||||||
value = kmalloc(size, GFP_NOFS);
|
value = kmalloc(size, GFP_KERNEL);
|
||||||
if (!value) {
|
if (!value) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -97,7 +97,7 @@ static struct __btrfs_workqueue *
|
|||||||
__btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
|
__btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
|
||||||
int thresh)
|
int thresh)
|
||||||
{
|
{
|
||||||
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
|
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -148,7 +148,7 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
|
|||||||
int limit_active,
|
int limit_active,
|
||||||
int thresh)
|
int thresh)
|
||||||
{
|
{
|
||||||
struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
|
struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -520,13 +520,10 @@ static inline int ref_for_same_block(struct __prelim_ref *ref1,
|
|||||||
static int __add_missing_keys(struct btrfs_fs_info *fs_info,
|
static int __add_missing_keys(struct btrfs_fs_info *fs_info,
|
||||||
struct list_head *head)
|
struct list_head *head)
|
||||||
{
|
{
|
||||||
struct list_head *pos;
|
struct __prelim_ref *ref;
|
||||||
struct extent_buffer *eb;
|
struct extent_buffer *eb;
|
||||||
|
|
||||||
list_for_each(pos, head) {
|
list_for_each_entry(ref, head, list) {
|
||||||
struct __prelim_ref *ref;
|
|
||||||
ref = list_entry(pos, struct __prelim_ref, list);
|
|
||||||
|
|
||||||
if (ref->parent)
|
if (ref->parent)
|
||||||
continue;
|
continue;
|
||||||
if (ref->key_for_search.type)
|
if (ref->key_for_search.type)
|
||||||
@ -563,23 +560,15 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
|
|||||||
*/
|
*/
|
||||||
static void __merge_refs(struct list_head *head, int mode)
|
static void __merge_refs(struct list_head *head, int mode)
|
||||||
{
|
{
|
||||||
struct list_head *pos1;
|
struct __prelim_ref *ref1;
|
||||||
|
|
||||||
list_for_each(pos1, head) {
|
list_for_each_entry(ref1, head, list) {
|
||||||
struct list_head *n2;
|
struct __prelim_ref *ref2 = ref1, *tmp;
|
||||||
struct list_head *pos2;
|
|
||||||
struct __prelim_ref *ref1;
|
|
||||||
|
|
||||||
ref1 = list_entry(pos1, struct __prelim_ref, list);
|
list_for_each_entry_safe_continue(ref2, tmp, head, list) {
|
||||||
|
|
||||||
for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
|
|
||||||
pos2 = n2, n2 = pos2->next) {
|
|
||||||
struct __prelim_ref *ref2;
|
|
||||||
struct __prelim_ref *xchg;
|
struct __prelim_ref *xchg;
|
||||||
struct extent_inode_elem *eie;
|
struct extent_inode_elem *eie;
|
||||||
|
|
||||||
ref2 = list_entry(pos2, struct __prelim_ref, list);
|
|
||||||
|
|
||||||
if (!ref_for_same_block(ref1, ref2))
|
if (!ref_for_same_block(ref1, ref2))
|
||||||
continue;
|
continue;
|
||||||
if (mode == 1) {
|
if (mode == 1) {
|
||||||
|
@ -192,6 +192,10 @@ struct btrfs_inode {
|
|||||||
/* File creation time. */
|
/* File creation time. */
|
||||||
struct timespec i_otime;
|
struct timespec i_otime;
|
||||||
|
|
||||||
|
/* Hook into fs_info->delayed_iputs */
|
||||||
|
struct list_head delayed_iput;
|
||||||
|
long delayed_iput_count;
|
||||||
|
|
||||||
struct inode vfs_inode;
|
struct inode vfs_inode;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -531,13 +531,9 @@ static struct btrfsic_block *btrfsic_block_hashtable_lookup(
|
|||||||
(((unsigned int)(dev_bytenr >> 16)) ^
|
(((unsigned int)(dev_bytenr >> 16)) ^
|
||||||
((unsigned int)((uintptr_t)bdev))) &
|
((unsigned int)((uintptr_t)bdev))) &
|
||||||
(BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
|
(BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
|
||||||
struct list_head *elem;
|
struct btrfsic_block *b;
|
||||||
|
|
||||||
list_for_each(elem, h->table + hashval) {
|
|
||||||
struct btrfsic_block *const b =
|
|
||||||
list_entry(elem, struct btrfsic_block,
|
|
||||||
collision_resolving_node);
|
|
||||||
|
|
||||||
|
list_for_each_entry(b, h->table + hashval, collision_resolving_node) {
|
||||||
if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
|
if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
@ -588,13 +584,9 @@ static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
|
|||||||
((unsigned int)((uintptr_t)bdev_ref_to)) ^
|
((unsigned int)((uintptr_t)bdev_ref_to)) ^
|
||||||
((unsigned int)((uintptr_t)bdev_ref_from))) &
|
((unsigned int)((uintptr_t)bdev_ref_from))) &
|
||||||
(BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
|
(BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
|
||||||
struct list_head *elem;
|
struct btrfsic_block_link *l;
|
||||||
|
|
||||||
list_for_each(elem, h->table + hashval) {
|
|
||||||
struct btrfsic_block_link *const l =
|
|
||||||
list_entry(elem, struct btrfsic_block_link,
|
|
||||||
collision_resolving_node);
|
|
||||||
|
|
||||||
|
list_for_each_entry(l, h->table + hashval, collision_resolving_node) {
|
||||||
BUG_ON(NULL == l->block_ref_to);
|
BUG_ON(NULL == l->block_ref_to);
|
||||||
BUG_ON(NULL == l->block_ref_from);
|
BUG_ON(NULL == l->block_ref_from);
|
||||||
if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
|
if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
|
||||||
@ -639,13 +631,9 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
|
|||||||
const unsigned int hashval =
|
const unsigned int hashval =
|
||||||
(((unsigned int)((uintptr_t)bdev)) &
|
(((unsigned int)((uintptr_t)bdev)) &
|
||||||
(BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
|
(BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
|
||||||
struct list_head *elem;
|
struct btrfsic_dev_state *ds;
|
||||||
|
|
||||||
list_for_each(elem, h->table + hashval) {
|
|
||||||
struct btrfsic_dev_state *const ds =
|
|
||||||
list_entry(elem, struct btrfsic_dev_state,
|
|
||||||
collision_resolving_node);
|
|
||||||
|
|
||||||
|
list_for_each_entry(ds, h->table + hashval, collision_resolving_node) {
|
||||||
if (ds->bdev == bdev)
|
if (ds->bdev == bdev)
|
||||||
return ds;
|
return ds;
|
||||||
}
|
}
|
||||||
@ -1720,29 +1708,20 @@ static int btrfsic_read_block(struct btrfsic_state *state,
|
|||||||
|
|
||||||
static void btrfsic_dump_database(struct btrfsic_state *state)
|
static void btrfsic_dump_database(struct btrfsic_state *state)
|
||||||
{
|
{
|
||||||
struct list_head *elem_all;
|
const struct btrfsic_block *b_all;
|
||||||
|
|
||||||
BUG_ON(NULL == state);
|
BUG_ON(NULL == state);
|
||||||
|
|
||||||
printk(KERN_INFO "all_blocks_list:\n");
|
printk(KERN_INFO "all_blocks_list:\n");
|
||||||
list_for_each(elem_all, &state->all_blocks_list) {
|
list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) {
|
||||||
const struct btrfsic_block *const b_all =
|
const struct btrfsic_block_link *l;
|
||||||
list_entry(elem_all, struct btrfsic_block,
|
|
||||||
all_blocks_node);
|
|
||||||
struct list_head *elem_ref_to;
|
|
||||||
struct list_head *elem_ref_from;
|
|
||||||
|
|
||||||
printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
|
printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
|
||||||
btrfsic_get_block_type(state, b_all),
|
btrfsic_get_block_type(state, b_all),
|
||||||
b_all->logical_bytenr, b_all->dev_state->name,
|
b_all->logical_bytenr, b_all->dev_state->name,
|
||||||
b_all->dev_bytenr, b_all->mirror_num);
|
b_all->dev_bytenr, b_all->mirror_num);
|
||||||
|
|
||||||
list_for_each(elem_ref_to, &b_all->ref_to_list) {
|
list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) {
|
||||||
const struct btrfsic_block_link *const l =
|
|
||||||
list_entry(elem_ref_to,
|
|
||||||
struct btrfsic_block_link,
|
|
||||||
node_ref_to);
|
|
||||||
|
|
||||||
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
|
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
|
||||||
" refers %u* to"
|
" refers %u* to"
|
||||||
" %c @%llu (%s/%llu/%d)\n",
|
" %c @%llu (%s/%llu/%d)\n",
|
||||||
@ -1757,12 +1736,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
|
|||||||
l->block_ref_to->mirror_num);
|
l->block_ref_to->mirror_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each(elem_ref_from, &b_all->ref_from_list) {
|
list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) {
|
||||||
const struct btrfsic_block_link *const l =
|
|
||||||
list_entry(elem_ref_from,
|
|
||||||
struct btrfsic_block_link,
|
|
||||||
node_ref_from);
|
|
||||||
|
|
||||||
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
|
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
|
||||||
" is ref %u* from"
|
" is ref %u* from"
|
||||||
" %c @%llu (%s/%llu/%d)\n",
|
" %c @%llu (%s/%llu/%d)\n",
|
||||||
@ -1845,8 +1819,7 @@ again:
|
|||||||
&state->block_hashtable);
|
&state->block_hashtable);
|
||||||
if (NULL != block) {
|
if (NULL != block) {
|
||||||
u64 bytenr = 0;
|
u64 bytenr = 0;
|
||||||
struct list_head *elem_ref_to;
|
struct btrfsic_block_link *l, *tmp;
|
||||||
struct list_head *tmp_ref_to;
|
|
||||||
|
|
||||||
if (block->is_superblock) {
|
if (block->is_superblock) {
|
||||||
bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
|
bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
|
||||||
@ -1967,13 +1940,8 @@ again:
|
|||||||
* because it still carries valueable information
|
* because it still carries valueable information
|
||||||
* like whether it was ever written and IO completed.
|
* like whether it was ever written and IO completed.
|
||||||
*/
|
*/
|
||||||
list_for_each_safe(elem_ref_to, tmp_ref_to,
|
list_for_each_entry_safe(l, tmp, &block->ref_to_list,
|
||||||
&block->ref_to_list) {
|
node_ref_to) {
|
||||||
struct btrfsic_block_link *const l =
|
|
||||||
list_entry(elem_ref_to,
|
|
||||||
struct btrfsic_block_link,
|
|
||||||
node_ref_to);
|
|
||||||
|
|
||||||
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
|
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
|
||||||
btrfsic_print_rem_link(state, l);
|
btrfsic_print_rem_link(state, l);
|
||||||
l->ref_cnt--;
|
l->ref_cnt--;
|
||||||
@ -2436,7 +2404,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
|
|||||||
struct btrfsic_block *const block,
|
struct btrfsic_block *const block,
|
||||||
int recursion_level)
|
int recursion_level)
|
||||||
{
|
{
|
||||||
struct list_head *elem_ref_to;
|
const struct btrfsic_block_link *l;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
|
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
|
||||||
@ -2464,11 +2432,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
|
|||||||
* This algorithm is recursive because the amount of used stack
|
* This algorithm is recursive because the amount of used stack
|
||||||
* space is very small and the max recursion depth is limited.
|
* space is very small and the max recursion depth is limited.
|
||||||
*/
|
*/
|
||||||
list_for_each(elem_ref_to, &block->ref_to_list) {
|
list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
|
||||||
const struct btrfsic_block_link *const l =
|
|
||||||
list_entry(elem_ref_to, struct btrfsic_block_link,
|
|
||||||
node_ref_to);
|
|
||||||
|
|
||||||
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
|
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
|
||||||
printk(KERN_INFO
|
printk(KERN_INFO
|
||||||
"rl=%d, %c @%llu (%s/%llu/%d)"
|
"rl=%d, %c @%llu (%s/%llu/%d)"
|
||||||
@ -2561,7 +2525,7 @@ static int btrfsic_is_block_ref_by_superblock(
|
|||||||
const struct btrfsic_block *block,
|
const struct btrfsic_block *block,
|
||||||
int recursion_level)
|
int recursion_level)
|
||||||
{
|
{
|
||||||
struct list_head *elem_ref_from;
|
const struct btrfsic_block_link *l;
|
||||||
|
|
||||||
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
|
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
|
||||||
/* refer to comment at "abort cyclic linkage (case 1)" */
|
/* refer to comment at "abort cyclic linkage (case 1)" */
|
||||||
@ -2576,11 +2540,7 @@ static int btrfsic_is_block_ref_by_superblock(
|
|||||||
* This algorithm is recursive because the amount of used stack space
|
* This algorithm is recursive because the amount of used stack space
|
||||||
* is very small and the max recursion depth is limited.
|
* is very small and the max recursion depth is limited.
|
||||||
*/
|
*/
|
||||||
list_for_each(elem_ref_from, &block->ref_from_list) {
|
list_for_each_entry(l, &block->ref_from_list, node_ref_from) {
|
||||||
const struct btrfsic_block_link *const l =
|
|
||||||
list_entry(elem_ref_from, struct btrfsic_block_link,
|
|
||||||
node_ref_from);
|
|
||||||
|
|
||||||
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
|
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
|
||||||
printk(KERN_INFO
|
printk(KERN_INFO
|
||||||
"rl=%d, %c @%llu (%s/%llu/%d)"
|
"rl=%d, %c @%llu (%s/%llu/%d)"
|
||||||
@ -2669,7 +2629,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
|
|||||||
const struct btrfsic_block *block,
|
const struct btrfsic_block *block,
|
||||||
int indent_level)
|
int indent_level)
|
||||||
{
|
{
|
||||||
struct list_head *elem_ref_to;
|
const struct btrfsic_block_link *l;
|
||||||
int indent_add;
|
int indent_add;
|
||||||
static char buf[80];
|
static char buf[80];
|
||||||
int cursor_position;
|
int cursor_position;
|
||||||
@ -2704,11 +2664,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
|
|||||||
}
|
}
|
||||||
|
|
||||||
cursor_position = indent_level;
|
cursor_position = indent_level;
|
||||||
list_for_each(elem_ref_to, &block->ref_to_list) {
|
list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
|
||||||
const struct btrfsic_block_link *const l =
|
|
||||||
list_entry(elem_ref_to, struct btrfsic_block_link,
|
|
||||||
node_ref_to);
|
|
||||||
|
|
||||||
while (cursor_position < indent_level) {
|
while (cursor_position < indent_level) {
|
||||||
printk(" ");
|
printk(" ");
|
||||||
cursor_position++;
|
cursor_position++;
|
||||||
@ -3165,8 +3121,7 @@ int btrfsic_mount(struct btrfs_root *root,
|
|||||||
void btrfsic_unmount(struct btrfs_root *root,
|
void btrfsic_unmount(struct btrfs_root *root,
|
||||||
struct btrfs_fs_devices *fs_devices)
|
struct btrfs_fs_devices *fs_devices)
|
||||||
{
|
{
|
||||||
struct list_head *elem_all;
|
struct btrfsic_block *b_all, *tmp_all;
|
||||||
struct list_head *tmp_all;
|
|
||||||
struct btrfsic_state *state;
|
struct btrfsic_state *state;
|
||||||
struct list_head *dev_head = &fs_devices->devices;
|
struct list_head *dev_head = &fs_devices->devices;
|
||||||
struct btrfs_device *device;
|
struct btrfs_device *device;
|
||||||
@ -3206,20 +3161,12 @@ void btrfsic_unmount(struct btrfs_root *root,
|
|||||||
* just free all memory that was allocated dynamically.
|
* just free all memory that was allocated dynamically.
|
||||||
* Free the blocks and the block_links.
|
* Free the blocks and the block_links.
|
||||||
*/
|
*/
|
||||||
list_for_each_safe(elem_all, tmp_all, &state->all_blocks_list) {
|
list_for_each_entry_safe(b_all, tmp_all, &state->all_blocks_list,
|
||||||
struct btrfsic_block *const b_all =
|
all_blocks_node) {
|
||||||
list_entry(elem_all, struct btrfsic_block,
|
struct btrfsic_block_link *l, *tmp;
|
||||||
all_blocks_node);
|
|
||||||
struct list_head *elem_ref_to;
|
|
||||||
struct list_head *tmp_ref_to;
|
|
||||||
|
|
||||||
list_for_each_safe(elem_ref_to, tmp_ref_to,
|
|
||||||
&b_all->ref_to_list) {
|
|
||||||
struct btrfsic_block_link *const l =
|
|
||||||
list_entry(elem_ref_to,
|
|
||||||
struct btrfsic_block_link,
|
|
||||||
node_ref_to);
|
|
||||||
|
|
||||||
|
list_for_each_entry_safe(l, tmp, &b_all->ref_to_list,
|
||||||
|
node_ref_to) {
|
||||||
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
|
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
|
||||||
btrfsic_print_rem_link(state, l);
|
btrfsic_print_rem_link(state, l);
|
||||||
|
|
||||||
|
@ -1555,7 +1555,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
|
search_start = buf->start & ~((u64)SZ_1G - 1);
|
||||||
|
|
||||||
if (parent)
|
if (parent)
|
||||||
btrfs_set_lock_blocking(parent);
|
btrfs_set_lock_blocking(parent);
|
||||||
@ -2248,7 +2248,6 @@ static void reada_for_search(struct btrfs_root *root,
|
|||||||
u64 target;
|
u64 target;
|
||||||
u64 nread = 0;
|
u64 nread = 0;
|
||||||
u64 gen;
|
u64 gen;
|
||||||
int direction = path->reada;
|
|
||||||
struct extent_buffer *eb;
|
struct extent_buffer *eb;
|
||||||
u32 nr;
|
u32 nr;
|
||||||
u32 blocksize;
|
u32 blocksize;
|
||||||
@ -2276,16 +2275,16 @@ static void reada_for_search(struct btrfs_root *root,
|
|||||||
nr = slot;
|
nr = slot;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
if (direction < 0) {
|
if (path->reada == READA_BACK) {
|
||||||
if (nr == 0)
|
if (nr == 0)
|
||||||
break;
|
break;
|
||||||
nr--;
|
nr--;
|
||||||
} else if (direction > 0) {
|
} else if (path->reada == READA_FORWARD) {
|
||||||
nr++;
|
nr++;
|
||||||
if (nr >= nritems)
|
if (nr >= nritems)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (path->reada < 0 && objectid) {
|
if (path->reada == READA_BACK && objectid) {
|
||||||
btrfs_node_key(node, &disk_key, nr);
|
btrfs_node_key(node, &disk_key, nr);
|
||||||
if (btrfs_disk_key_objectid(&disk_key) != objectid)
|
if (btrfs_disk_key_objectid(&disk_key) != objectid)
|
||||||
break;
|
break;
|
||||||
@ -2493,7 +2492,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
|
|||||||
btrfs_set_path_blocking(p);
|
btrfs_set_path_blocking(p);
|
||||||
|
|
||||||
free_extent_buffer(tmp);
|
free_extent_buffer(tmp);
|
||||||
if (p->reada)
|
if (p->reada != READA_NONE)
|
||||||
reada_for_search(root, p, level, slot, key->objectid);
|
reada_for_search(root, p, level, slot, key->objectid);
|
||||||
|
|
||||||
btrfs_release_path(p);
|
btrfs_release_path(p);
|
||||||
|
178
fs/btrfs/ctree.h
178
fs/btrfs/ctree.h
@ -35,6 +35,7 @@
|
|||||||
#include <linux/btrfs.h>
|
#include <linux/btrfs.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/security.h>
|
#include <linux/security.h>
|
||||||
|
#include <linux/sizes.h>
|
||||||
#include "extent_io.h"
|
#include "extent_io.h"
|
||||||
#include "extent_map.h"
|
#include "extent_map.h"
|
||||||
#include "async-thread.h"
|
#include "async-thread.h"
|
||||||
@ -96,6 +97,9 @@ struct btrfs_ordered_sum;
|
|||||||
/* for storing items that use the BTRFS_UUID_KEY* types */
|
/* for storing items that use the BTRFS_UUID_KEY* types */
|
||||||
#define BTRFS_UUID_TREE_OBJECTID 9ULL
|
#define BTRFS_UUID_TREE_OBJECTID 9ULL
|
||||||
|
|
||||||
|
/* tracks free space in block groups. */
|
||||||
|
#define BTRFS_FREE_SPACE_TREE_OBJECTID 10ULL
|
||||||
|
|
||||||
/* for storing balance parameters in the root tree */
|
/* for storing balance parameters in the root tree */
|
||||||
#define BTRFS_BALANCE_OBJECTID -4ULL
|
#define BTRFS_BALANCE_OBJECTID -4ULL
|
||||||
|
|
||||||
@ -174,7 +178,7 @@ struct btrfs_ordered_sum;
|
|||||||
/* csum types */
|
/* csum types */
|
||||||
#define BTRFS_CSUM_TYPE_CRC32 0
|
#define BTRFS_CSUM_TYPE_CRC32 0
|
||||||
|
|
||||||
static int btrfs_csum_sizes[] = { 4 };
|
static const int btrfs_csum_sizes[] = { 4 };
|
||||||
|
|
||||||
/* four bytes for CRC32 */
|
/* four bytes for CRC32 */
|
||||||
#define BTRFS_EMPTY_DIR_SIZE 0
|
#define BTRFS_EMPTY_DIR_SIZE 0
|
||||||
@ -196,9 +200,9 @@ static int btrfs_csum_sizes[] = { 4 };
|
|||||||
/* ioprio of readahead is set to idle */
|
/* ioprio of readahead is set to idle */
|
||||||
#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
|
#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
|
||||||
|
|
||||||
#define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024)
|
#define BTRFS_DIRTY_METADATA_THRESH SZ_32M
|
||||||
|
|
||||||
#define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024)
|
#define BTRFS_MAX_EXTENT_SIZE SZ_128M
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The key defines the order in the tree, and so it also defines (optimal)
|
* The key defines the order in the tree, and so it also defines (optimal)
|
||||||
@ -500,6 +504,8 @@ struct btrfs_super_block {
|
|||||||
* Compat flags that we support. If any incompat flags are set other than the
|
* Compat flags that we support. If any incompat flags are set other than the
|
||||||
* ones specified below then we will fail to mount
|
* ones specified below then we will fail to mount
|
||||||
*/
|
*/
|
||||||
|
#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0)
|
||||||
|
|
||||||
#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
|
#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
|
||||||
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
|
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
|
||||||
#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2)
|
#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2)
|
||||||
@ -526,7 +532,10 @@ struct btrfs_super_block {
|
|||||||
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
|
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
|
||||||
#define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL
|
#define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL
|
||||||
#define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
|
#define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
|
||||||
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
|
|
||||||
|
#define BTRFS_FEATURE_COMPAT_RO_SUPP \
|
||||||
|
(BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE)
|
||||||
|
|
||||||
#define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
|
#define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
|
||||||
#define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
|
#define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
|
||||||
|
|
||||||
@ -590,14 +599,15 @@ struct btrfs_node {
|
|||||||
* The slots array records the index of the item or block pointer
|
* The slots array records the index of the item or block pointer
|
||||||
* used while walking the tree.
|
* used while walking the tree.
|
||||||
*/
|
*/
|
||||||
|
enum { READA_NONE = 0, READA_BACK, READA_FORWARD };
|
||||||
struct btrfs_path {
|
struct btrfs_path {
|
||||||
struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
|
struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
|
||||||
int slots[BTRFS_MAX_LEVEL];
|
int slots[BTRFS_MAX_LEVEL];
|
||||||
/* if there is real range locking, this locks field will change */
|
/* if there is real range locking, this locks field will change */
|
||||||
int locks[BTRFS_MAX_LEVEL];
|
u8 locks[BTRFS_MAX_LEVEL];
|
||||||
int reada;
|
u8 reada;
|
||||||
/* keep some upper locks as we walk down */
|
/* keep some upper locks as we walk down */
|
||||||
int lowest_level;
|
u8 lowest_level;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* set by btrfs_split_item, tells search_slot to keep all locks
|
* set by btrfs_split_item, tells search_slot to keep all locks
|
||||||
@ -1088,6 +1098,13 @@ struct btrfs_block_group_item {
|
|||||||
__le64 flags;
|
__le64 flags;
|
||||||
} __attribute__ ((__packed__));
|
} __attribute__ ((__packed__));
|
||||||
|
|
||||||
|
struct btrfs_free_space_info {
|
||||||
|
__le32 extent_count;
|
||||||
|
__le32 flags;
|
||||||
|
} __attribute__ ((__packed__));
|
||||||
|
|
||||||
|
#define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0)
|
||||||
|
|
||||||
#define BTRFS_QGROUP_LEVEL_SHIFT 48
|
#define BTRFS_QGROUP_LEVEL_SHIFT 48
|
||||||
static inline u64 btrfs_qgroup_level(u64 qgroupid)
|
static inline u64 btrfs_qgroup_level(u64 qgroupid)
|
||||||
{
|
{
|
||||||
@ -1296,6 +1313,9 @@ struct btrfs_caching_control {
|
|||||||
atomic_t count;
|
atomic_t count;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Once caching_thread() finds this much free space, it will wake up waiters. */
|
||||||
|
#define CACHING_CTL_WAKE_UP (1024 * 1024 * 2)
|
||||||
|
|
||||||
struct btrfs_io_ctl {
|
struct btrfs_io_ctl {
|
||||||
void *cur, *orig;
|
void *cur, *orig;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
@ -1321,8 +1341,20 @@ struct btrfs_block_group_cache {
|
|||||||
u64 delalloc_bytes;
|
u64 delalloc_bytes;
|
||||||
u64 bytes_super;
|
u64 bytes_super;
|
||||||
u64 flags;
|
u64 flags;
|
||||||
u64 sectorsize;
|
|
||||||
u64 cache_generation;
|
u64 cache_generation;
|
||||||
|
u32 sectorsize;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the free space extent count exceeds this number, convert the block
|
||||||
|
* group to bitmaps.
|
||||||
|
*/
|
||||||
|
u32 bitmap_high_thresh;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the free space extent count drops below this number, convert the
|
||||||
|
* block group back to extents.
|
||||||
|
*/
|
||||||
|
u32 bitmap_low_thresh;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It is just used for the delayed data space allocation because
|
* It is just used for the delayed data space allocation because
|
||||||
@ -1378,6 +1410,15 @@ struct btrfs_block_group_cache {
|
|||||||
struct list_head io_list;
|
struct list_head io_list;
|
||||||
|
|
||||||
struct btrfs_io_ctl io_ctl;
|
struct btrfs_io_ctl io_ctl;
|
||||||
|
|
||||||
|
/* Lock for free space tree operations. */
|
||||||
|
struct mutex free_space_lock;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Does the block group need to be added to the free space tree?
|
||||||
|
* Protected by free_space_lock.
|
||||||
|
*/
|
||||||
|
int needs_free_space;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* delayed seq elem */
|
/* delayed seq elem */
|
||||||
@ -1429,6 +1470,7 @@ struct btrfs_fs_info {
|
|||||||
struct btrfs_root *csum_root;
|
struct btrfs_root *csum_root;
|
||||||
struct btrfs_root *quota_root;
|
struct btrfs_root *quota_root;
|
||||||
struct btrfs_root *uuid_root;
|
struct btrfs_root *uuid_root;
|
||||||
|
struct btrfs_root *free_space_root;
|
||||||
|
|
||||||
/* the log root tree is a directory of all the other log roots */
|
/* the log root tree is a directory of all the other log roots */
|
||||||
struct btrfs_root *log_root_tree;
|
struct btrfs_root *log_root_tree;
|
||||||
@ -1816,6 +1858,8 @@ struct btrfs_fs_info {
|
|||||||
* and will be latter freed. Protected by fs_info->chunk_mutex.
|
* and will be latter freed. Protected by fs_info->chunk_mutex.
|
||||||
*/
|
*/
|
||||||
struct list_head pinned_chunks;
|
struct list_head pinned_chunks;
|
||||||
|
|
||||||
|
int creating_free_space_tree;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct btrfs_subvolume_writers {
|
struct btrfs_subvolume_writers {
|
||||||
@ -2092,6 +2136,27 @@ struct btrfs_ioctl_defrag_range_args {
|
|||||||
*/
|
*/
|
||||||
#define BTRFS_BLOCK_GROUP_ITEM_KEY 192
|
#define BTRFS_BLOCK_GROUP_ITEM_KEY 192
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Every block group is represented in the free space tree by a free space info
|
||||||
|
* item, which stores some accounting information. It is keyed on
|
||||||
|
* (block_group_start, FREE_SPACE_INFO, block_group_length).
|
||||||
|
*/
|
||||||
|
#define BTRFS_FREE_SPACE_INFO_KEY 198
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A free space extent tracks an extent of space that is free in a block group.
|
||||||
|
* It is keyed on (start, FREE_SPACE_EXTENT, length).
|
||||||
|
*/
|
||||||
|
#define BTRFS_FREE_SPACE_EXTENT_KEY 199
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When a block group becomes very fragmented, we convert it to use bitmaps
|
||||||
|
* instead of extents. A free space bitmap is keyed on
|
||||||
|
* (start, FREE_SPACE_BITMAP, length); the corresponding item is a bitmap with
|
||||||
|
* (length / sectorsize) bits.
|
||||||
|
*/
|
||||||
|
#define BTRFS_FREE_SPACE_BITMAP_KEY 200
|
||||||
|
|
||||||
#define BTRFS_DEV_EXTENT_KEY 204
|
#define BTRFS_DEV_EXTENT_KEY 204
|
||||||
#define BTRFS_DEV_ITEM_KEY 216
|
#define BTRFS_DEV_ITEM_KEY 216
|
||||||
#define BTRFS_CHUNK_ITEM_KEY 228
|
#define BTRFS_CHUNK_ITEM_KEY 228
|
||||||
@ -2184,6 +2249,7 @@ struct btrfs_ioctl_defrag_range_args {
|
|||||||
#define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23)
|
#define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23)
|
||||||
#define BTRFS_MOUNT_FRAGMENT_DATA (1 << 24)
|
#define BTRFS_MOUNT_FRAGMENT_DATA (1 << 24)
|
||||||
#define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25)
|
#define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25)
|
||||||
|
#define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26)
|
||||||
|
|
||||||
#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
|
#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
|
||||||
#define BTRFS_DEFAULT_MAX_INLINE (8192)
|
#define BTRFS_DEFAULT_MAX_INLINE (8192)
|
||||||
@ -2506,6 +2572,11 @@ BTRFS_SETGET_FUNCS(disk_block_group_flags,
|
|||||||
BTRFS_SETGET_STACK_FUNCS(block_group_flags,
|
BTRFS_SETGET_STACK_FUNCS(block_group_flags,
|
||||||
struct btrfs_block_group_item, flags, 64);
|
struct btrfs_block_group_item, flags, 64);
|
||||||
|
|
||||||
|
/* struct btrfs_free_space_info */
|
||||||
|
BTRFS_SETGET_FUNCS(free_space_extent_count, struct btrfs_free_space_info,
|
||||||
|
extent_count, 32);
|
||||||
|
BTRFS_SETGET_FUNCS(free_space_flags, struct btrfs_free_space_info, flags, 32);
|
||||||
|
|
||||||
/* struct btrfs_inode_ref */
|
/* struct btrfs_inode_ref */
|
||||||
BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16);
|
BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16);
|
||||||
BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64);
|
BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64);
|
||||||
@ -3573,6 +3644,9 @@ void btrfs_end_write_no_snapshoting(struct btrfs_root *root);
|
|||||||
void check_system_chunk(struct btrfs_trans_handle *trans,
|
void check_system_chunk(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_root *root,
|
struct btrfs_root *root,
|
||||||
const u64 type);
|
const u64 type);
|
||||||
|
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
|
||||||
|
struct btrfs_fs_info *info, u64 start, u64 end);
|
||||||
|
|
||||||
/* ctree.c */
|
/* ctree.c */
|
||||||
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
|
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
|
||||||
int level, int *slot);
|
int level, int *slot);
|
||||||
@ -3737,6 +3811,7 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
|
|||||||
kfree(fs_info->csum_root);
|
kfree(fs_info->csum_root);
|
||||||
kfree(fs_info->quota_root);
|
kfree(fs_info->quota_root);
|
||||||
kfree(fs_info->uuid_root);
|
kfree(fs_info->uuid_root);
|
||||||
|
kfree(fs_info->free_space_root);
|
||||||
kfree(fs_info->super_copy);
|
kfree(fs_info->super_copy);
|
||||||
kfree(fs_info->super_for_commit);
|
kfree(fs_info->super_for_commit);
|
||||||
security_free_mnt_opts(&fs_info->security_opts);
|
security_free_mnt_opts(&fs_info->security_opts);
|
||||||
@ -3906,7 +3981,6 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
|
|||||||
/* inode.c */
|
/* inode.c */
|
||||||
struct btrfs_delalloc_work {
|
struct btrfs_delalloc_work {
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
int wait;
|
|
||||||
int delay_iput;
|
int delay_iput;
|
||||||
struct completion completion;
|
struct completion completion;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
@ -3914,7 +3988,7 @@ struct btrfs_delalloc_work {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
|
struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
|
||||||
int wait, int delay_iput);
|
int delay_iput);
|
||||||
void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work);
|
void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work);
|
||||||
|
|
||||||
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
|
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
|
||||||
@ -4253,16 +4327,98 @@ static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define btrfs_clear_fs_incompat(__fs_info, opt) \
|
||||||
|
__btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
|
||||||
|
|
||||||
|
static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info,
|
||||||
|
u64 flag)
|
||||||
|
{
|
||||||
|
struct btrfs_super_block *disk_super;
|
||||||
|
u64 features;
|
||||||
|
|
||||||
|
disk_super = fs_info->super_copy;
|
||||||
|
features = btrfs_super_incompat_flags(disk_super);
|
||||||
|
if (features & flag) {
|
||||||
|
spin_lock(&fs_info->super_lock);
|
||||||
|
features = btrfs_super_incompat_flags(disk_super);
|
||||||
|
if (features & flag) {
|
||||||
|
features &= ~flag;
|
||||||
|
btrfs_set_super_incompat_flags(disk_super, features);
|
||||||
|
btrfs_info(fs_info, "clearing %llu feature flag",
|
||||||
|
flag);
|
||||||
|
}
|
||||||
|
spin_unlock(&fs_info->super_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#define btrfs_fs_incompat(fs_info, opt) \
|
#define btrfs_fs_incompat(fs_info, opt) \
|
||||||
__btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
|
__btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
|
||||||
|
|
||||||
static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
|
static inline bool __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
|
||||||
{
|
{
|
||||||
struct btrfs_super_block *disk_super;
|
struct btrfs_super_block *disk_super;
|
||||||
disk_super = fs_info->super_copy;
|
disk_super = fs_info->super_copy;
|
||||||
return !!(btrfs_super_incompat_flags(disk_super) & flag);
|
return !!(btrfs_super_incompat_flags(disk_super) & flag);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define btrfs_set_fs_compat_ro(__fs_info, opt) \
|
||||||
|
__btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
|
||||||
|
|
||||||
|
static inline void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info,
|
||||||
|
u64 flag)
|
||||||
|
{
|
||||||
|
struct btrfs_super_block *disk_super;
|
||||||
|
u64 features;
|
||||||
|
|
||||||
|
disk_super = fs_info->super_copy;
|
||||||
|
features = btrfs_super_compat_ro_flags(disk_super);
|
||||||
|
if (!(features & flag)) {
|
||||||
|
spin_lock(&fs_info->super_lock);
|
||||||
|
features = btrfs_super_compat_ro_flags(disk_super);
|
||||||
|
if (!(features & flag)) {
|
||||||
|
features |= flag;
|
||||||
|
btrfs_set_super_compat_ro_flags(disk_super, features);
|
||||||
|
btrfs_info(fs_info, "setting %llu ro feature flag",
|
||||||
|
flag);
|
||||||
|
}
|
||||||
|
spin_unlock(&fs_info->super_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define btrfs_clear_fs_compat_ro(__fs_info, opt) \
|
||||||
|
__btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
|
||||||
|
|
||||||
|
static inline void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info,
|
||||||
|
u64 flag)
|
||||||
|
{
|
||||||
|
struct btrfs_super_block *disk_super;
|
||||||
|
u64 features;
|
||||||
|
|
||||||
|
disk_super = fs_info->super_copy;
|
||||||
|
features = btrfs_super_compat_ro_flags(disk_super);
|
||||||
|
if (features & flag) {
|
||||||
|
spin_lock(&fs_info->super_lock);
|
||||||
|
features = btrfs_super_compat_ro_flags(disk_super);
|
||||||
|
if (features & flag) {
|
||||||
|
features &= ~flag;
|
||||||
|
btrfs_set_super_compat_ro_flags(disk_super, features);
|
||||||
|
btrfs_info(fs_info, "clearing %llu ro feature flag",
|
||||||
|
flag);
|
||||||
|
}
|
||||||
|
spin_unlock(&fs_info->super_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define btrfs_fs_compat_ro(fs_info, opt) \
|
||||||
|
__btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
|
||||||
|
|
||||||
|
static inline int __btrfs_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag)
|
||||||
|
{
|
||||||
|
struct btrfs_super_block *disk_super;
|
||||||
|
disk_super = fs_info->super_copy;
|
||||||
|
return !!(btrfs_super_compat_ro_flags(disk_super) & flag);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call btrfs_abort_transaction as early as possible when an error condition is
|
* Call btrfs_abort_transaction as early as possible when an error condition is
|
||||||
* detected, that way the exact line number is reported.
|
* detected, that way the exact line number is reported.
|
||||||
|
@ -54,16 +54,11 @@ static inline void btrfs_init_delayed_node(
|
|||||||
delayed_node->root = root;
|
delayed_node->root = root;
|
||||||
delayed_node->inode_id = inode_id;
|
delayed_node->inode_id = inode_id;
|
||||||
atomic_set(&delayed_node->refs, 0);
|
atomic_set(&delayed_node->refs, 0);
|
||||||
delayed_node->count = 0;
|
|
||||||
delayed_node->flags = 0;
|
|
||||||
delayed_node->ins_root = RB_ROOT;
|
delayed_node->ins_root = RB_ROOT;
|
||||||
delayed_node->del_root = RB_ROOT;
|
delayed_node->del_root = RB_ROOT;
|
||||||
mutex_init(&delayed_node->mutex);
|
mutex_init(&delayed_node->mutex);
|
||||||
delayed_node->index_cnt = 0;
|
|
||||||
INIT_LIST_HEAD(&delayed_node->n_list);
|
INIT_LIST_HEAD(&delayed_node->n_list);
|
||||||
INIT_LIST_HEAD(&delayed_node->p_list);
|
INIT_LIST_HEAD(&delayed_node->p_list);
|
||||||
delayed_node->bytes_reserved = 0;
|
|
||||||
memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int btrfs_is_continuous_delayed_item(
|
static inline int btrfs_is_continuous_delayed_item(
|
||||||
@ -132,7 +127,7 @@ again:
|
|||||||
if (node)
|
if (node)
|
||||||
return node;
|
return node;
|
||||||
|
|
||||||
node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
|
node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
|
||||||
if (!node)
|
if (!node)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
btrfs_init_delayed_node(node, root, ino);
|
btrfs_init_delayed_node(node, root, ino);
|
||||||
|
@ -493,12 +493,12 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
|
|||||||
memcpy(&existing_ref->extent_op->key,
|
memcpy(&existing_ref->extent_op->key,
|
||||||
&ref->extent_op->key,
|
&ref->extent_op->key,
|
||||||
sizeof(ref->extent_op->key));
|
sizeof(ref->extent_op->key));
|
||||||
existing_ref->extent_op->update_key = 1;
|
existing_ref->extent_op->update_key = true;
|
||||||
}
|
}
|
||||||
if (ref->extent_op->update_flags) {
|
if (ref->extent_op->update_flags) {
|
||||||
existing_ref->extent_op->flags_to_set |=
|
existing_ref->extent_op->flags_to_set |=
|
||||||
ref->extent_op->flags_to_set;
|
ref->extent_op->flags_to_set;
|
||||||
existing_ref->extent_op->update_flags = 1;
|
existing_ref->extent_op->update_flags = true;
|
||||||
}
|
}
|
||||||
btrfs_free_delayed_extent_op(ref->extent_op);
|
btrfs_free_delayed_extent_op(ref->extent_op);
|
||||||
}
|
}
|
||||||
|
@ -75,11 +75,11 @@ struct btrfs_delayed_ref_node {
|
|||||||
|
|
||||||
struct btrfs_delayed_extent_op {
|
struct btrfs_delayed_extent_op {
|
||||||
struct btrfs_disk_key key;
|
struct btrfs_disk_key key;
|
||||||
|
u8 level;
|
||||||
|
bool update_key;
|
||||||
|
bool update_flags;
|
||||||
|
bool is_data;
|
||||||
u64 flags_to_set;
|
u64 flags_to_set;
|
||||||
int level;
|
|
||||||
unsigned int update_key:1;
|
|
||||||
unsigned int update_flags:1;
|
|
||||||
unsigned int is_data:1;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
#include "locking.h"
|
#include "locking.h"
|
||||||
#include "tree-log.h"
|
#include "tree-log.h"
|
||||||
#include "free-space-cache.h"
|
#include "free-space-cache.h"
|
||||||
|
#include "free-space-tree.h"
|
||||||
#include "inode-map.h"
|
#include "inode-map.h"
|
||||||
#include "check-integrity.h"
|
#include "check-integrity.h"
|
||||||
#include "rcu-string.h"
|
#include "rcu-string.h"
|
||||||
@ -362,7 +363,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
|
|||||||
}
|
}
|
||||||
|
|
||||||
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
|
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
|
||||||
0, &cached_state);
|
&cached_state);
|
||||||
if (extent_buffer_uptodate(eb) &&
|
if (extent_buffer_uptodate(eb) &&
|
||||||
btrfs_header_generation(eb) == parent_transid) {
|
btrfs_header_generation(eb) == parent_transid) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -1650,6 +1651,9 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
|
|||||||
if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
|
if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
|
||||||
return fs_info->uuid_root ? fs_info->uuid_root :
|
return fs_info->uuid_root ? fs_info->uuid_root :
|
||||||
ERR_PTR(-ENOENT);
|
ERR_PTR(-ENOENT);
|
||||||
|
if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
|
||||||
|
return fs_info->free_space_root ? fs_info->free_space_root :
|
||||||
|
ERR_PTR(-ENOENT);
|
||||||
again:
|
again:
|
||||||
root = btrfs_lookup_fs_root(fs_info, location->objectid);
|
root = btrfs_lookup_fs_root(fs_info, location->objectid);
|
||||||
if (root) {
|
if (root) {
|
||||||
@ -2148,6 +2152,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
|
|||||||
free_root_extent_buffers(info->uuid_root);
|
free_root_extent_buffers(info->uuid_root);
|
||||||
if (chunk_root)
|
if (chunk_root)
|
||||||
free_root_extent_buffers(info->chunk_root);
|
free_root_extent_buffers(info->chunk_root);
|
||||||
|
free_root_extent_buffers(info->free_space_root);
|
||||||
}
|
}
|
||||||
|
|
||||||
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
|
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
|
||||||
@ -2448,6 +2453,15 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
|
|||||||
fs_info->uuid_root = root;
|
fs_info->uuid_root = root;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
|
||||||
|
location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
|
||||||
|
root = btrfs_read_tree_root(tree_root, &location);
|
||||||
|
if (IS_ERR(root))
|
||||||
|
return PTR_ERR(root);
|
||||||
|
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
|
||||||
|
fs_info->free_space_root = root;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2668,6 +2682,7 @@ int open_ctree(struct super_block *sb,
|
|||||||
if (btrfs_check_super_csum(bh->b_data)) {
|
if (btrfs_check_super_csum(bh->b_data)) {
|
||||||
printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
|
printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
brelse(bh);
|
||||||
goto fail_alloc;
|
goto fail_alloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2809,7 +2824,7 @@ int open_ctree(struct super_block *sb,
|
|||||||
|
|
||||||
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
|
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
|
||||||
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
|
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
|
||||||
4 * 1024 * 1024 / PAGE_CACHE_SIZE);
|
SZ_4M / PAGE_CACHE_SIZE);
|
||||||
|
|
||||||
tree_root->nodesize = nodesize;
|
tree_root->nodesize = nodesize;
|
||||||
tree_root->sectorsize = sectorsize;
|
tree_root->sectorsize = sectorsize;
|
||||||
@ -3051,6 +3066,18 @@ retry_root_backup:
|
|||||||
if (sb->s_flags & MS_RDONLY)
|
if (sb->s_flags & MS_RDONLY)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (btrfs_test_opt(tree_root, FREE_SPACE_TREE) &&
|
||||||
|
!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
|
||||||
|
pr_info("BTRFS: creating free space tree\n");
|
||||||
|
ret = btrfs_create_free_space_tree(fs_info);
|
||||||
|
if (ret) {
|
||||||
|
pr_warn("BTRFS: failed to create free space tree %d\n",
|
||||||
|
ret);
|
||||||
|
close_ctree(tree_root);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
down_read(&fs_info->cleanup_work_sem);
|
down_read(&fs_info->cleanup_work_sem);
|
||||||
if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
|
if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
|
||||||
(ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
|
(ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
|
||||||
@ -3076,6 +3103,18 @@ retry_root_backup:
|
|||||||
|
|
||||||
btrfs_qgroup_rescan_resume(fs_info);
|
btrfs_qgroup_rescan_resume(fs_info);
|
||||||
|
|
||||||
|
if (btrfs_test_opt(tree_root, CLEAR_CACHE) &&
|
||||||
|
btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
|
||||||
|
pr_info("BTRFS: clearing free space tree\n");
|
||||||
|
ret = btrfs_clear_free_space_tree(fs_info);
|
||||||
|
if (ret) {
|
||||||
|
pr_warn("BTRFS: failed to clear free space tree %d\n",
|
||||||
|
ret);
|
||||||
|
close_ctree(tree_root);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!fs_info->uuid_root) {
|
if (!fs_info->uuid_root) {
|
||||||
pr_info("BTRFS: creating UUID tree\n");
|
pr_info("BTRFS: creating UUID tree\n");
|
||||||
ret = btrfs_create_uuid_tree(fs_info);
|
ret = btrfs_create_uuid_tree(fs_info);
|
||||||
@ -3902,11 +3941,6 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
|
|||||||
return !ret;
|
return !ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
|
|
||||||
{
|
|
||||||
return set_extent_buffer_uptodate(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
|
void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
|
||||||
{
|
{
|
||||||
struct btrfs_root *root;
|
struct btrfs_root *root;
|
||||||
@ -3962,7 +3996,6 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
|
|||||||
balance_dirty_pages_ratelimited(
|
balance_dirty_pages_ratelimited(
|
||||||
root->fs_info->btree_inode->i_mapping);
|
root->fs_info->btree_inode->i_mapping);
|
||||||
}
|
}
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void btrfs_btree_balance_dirty(struct btrfs_root *root)
|
void btrfs_btree_balance_dirty(struct btrfs_root *root)
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
#ifndef __DISKIO__
|
#ifndef __DISKIO__
|
||||||
#define __DISKIO__
|
#define __DISKIO__
|
||||||
|
|
||||||
#define BTRFS_SUPER_INFO_OFFSET (64 * 1024)
|
#define BTRFS_SUPER_INFO_OFFSET SZ_64K
|
||||||
#define BTRFS_SUPER_INFO_SIZE 4096
|
#define BTRFS_SUPER_INFO_SIZE 4096
|
||||||
|
|
||||||
#define BTRFS_SUPER_MIRROR_MAX 3
|
#define BTRFS_SUPER_MIRROR_MAX 3
|
||||||
@ -35,7 +35,7 @@ enum btrfs_wq_endio_type {
|
|||||||
|
|
||||||
static inline u64 btrfs_sb_offset(int mirror)
|
static inline u64 btrfs_sb_offset(int mirror)
|
||||||
{
|
{
|
||||||
u64 start = 16 * 1024;
|
u64 start = SZ_16K;
|
||||||
if (mirror)
|
if (mirror)
|
||||||
return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror);
|
return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror);
|
||||||
return BTRFS_SUPER_INFO_OFFSET;
|
return BTRFS_SUPER_INFO_OFFSET;
|
||||||
@ -116,7 +116,6 @@ static inline void btrfs_put_fs_root(struct btrfs_root *root)
|
|||||||
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
|
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
|
||||||
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
|
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
|
||||||
int atomic);
|
int atomic);
|
||||||
int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
|
|
||||||
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
|
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
|
||||||
u32 btrfs_csum_data(char *data, u32 seed, size_t len);
|
u32 btrfs_csum_data(char *data, u32 seed, size_t len);
|
||||||
void btrfs_csum_final(u32 crc, char *result);
|
void btrfs_csum_final(u32 crc, char *result);
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
#include "raid56.h"
|
#include "raid56.h"
|
||||||
#include "locking.h"
|
#include "locking.h"
|
||||||
#include "free-space-cache.h"
|
#include "free-space-cache.h"
|
||||||
|
#include "free-space-tree.h"
|
||||||
#include "math.h"
|
#include "math.h"
|
||||||
#include "sysfs.h"
|
#include "sysfs.h"
|
||||||
#include "qgroup.h"
|
#include "qgroup.h"
|
||||||
@ -357,8 +358,8 @@ static void fragment_free_space(struct btrfs_root *root,
|
|||||||
* we need to check the pinned_extents for any extents that can't be used yet
|
* we need to check the pinned_extents for any extents that can't be used yet
|
||||||
* since their free space will be released as soon as the transaction commits.
|
* since their free space will be released as soon as the transaction commits.
|
||||||
*/
|
*/
|
||||||
static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
|
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
|
||||||
struct btrfs_fs_info *info, u64 start, u64 end)
|
struct btrfs_fs_info *info, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
u64 extent_start, extent_end, size, total_added = 0;
|
u64 extent_start, extent_end, size, total_added = 0;
|
||||||
int ret;
|
int ret;
|
||||||
@ -395,11 +396,10 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
|
|||||||
return total_added;
|
return total_added;
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline void caching_thread(struct btrfs_work *work)
|
static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
|
||||||
{
|
{
|
||||||
struct btrfs_block_group_cache *block_group;
|
struct btrfs_block_group_cache *block_group;
|
||||||
struct btrfs_fs_info *fs_info;
|
struct btrfs_fs_info *fs_info;
|
||||||
struct btrfs_caching_control *caching_ctl;
|
|
||||||
struct btrfs_root *extent_root;
|
struct btrfs_root *extent_root;
|
||||||
struct btrfs_path *path;
|
struct btrfs_path *path;
|
||||||
struct extent_buffer *leaf;
|
struct extent_buffer *leaf;
|
||||||
@ -407,17 +407,16 @@ static noinline void caching_thread(struct btrfs_work *work)
|
|||||||
u64 total_found = 0;
|
u64 total_found = 0;
|
||||||
u64 last = 0;
|
u64 last = 0;
|
||||||
u32 nritems;
|
u32 nritems;
|
||||||
int ret = -ENOMEM;
|
int ret;
|
||||||
bool wakeup = true;
|
bool wakeup = true;
|
||||||
|
|
||||||
caching_ctl = container_of(work, struct btrfs_caching_control, work);
|
|
||||||
block_group = caching_ctl->block_group;
|
block_group = caching_ctl->block_group;
|
||||||
fs_info = block_group->fs_info;
|
fs_info = block_group->fs_info;
|
||||||
extent_root = fs_info->extent_root;
|
extent_root = fs_info->extent_root;
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
goto out;
|
return -ENOMEM;
|
||||||
|
|
||||||
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
|
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
|
||||||
|
|
||||||
@ -438,20 +437,16 @@ static noinline void caching_thread(struct btrfs_work *work)
|
|||||||
*/
|
*/
|
||||||
path->skip_locking = 1;
|
path->skip_locking = 1;
|
||||||
path->search_commit_root = 1;
|
path->search_commit_root = 1;
|
||||||
path->reada = 1;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
key.objectid = last;
|
key.objectid = last;
|
||||||
key.offset = 0;
|
key.offset = 0;
|
||||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||||
again:
|
|
||||||
mutex_lock(&caching_ctl->mutex);
|
|
||||||
/* need to make sure the commit_root doesn't disappear */
|
|
||||||
down_read(&fs_info->commit_root_sem);
|
|
||||||
|
|
||||||
next:
|
next:
|
||||||
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
|
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err;
|
goto out;
|
||||||
|
|
||||||
leaf = path->nodes[0];
|
leaf = path->nodes[0];
|
||||||
nritems = btrfs_header_nritems(leaf);
|
nritems = btrfs_header_nritems(leaf);
|
||||||
@ -477,12 +472,14 @@ next:
|
|||||||
up_read(&fs_info->commit_root_sem);
|
up_read(&fs_info->commit_root_sem);
|
||||||
mutex_unlock(&caching_ctl->mutex);
|
mutex_unlock(&caching_ctl->mutex);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
goto again;
|
mutex_lock(&caching_ctl->mutex);
|
||||||
|
down_read(&fs_info->commit_root_sem);
|
||||||
|
goto next;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_next_leaf(extent_root, path);
|
ret = btrfs_next_leaf(extent_root, path);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err;
|
goto out;
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
leaf = path->nodes[0];
|
leaf = path->nodes[0];
|
||||||
@ -521,7 +518,7 @@ next:
|
|||||||
else
|
else
|
||||||
last = key.objectid + key.offset;
|
last = key.objectid + key.offset;
|
||||||
|
|
||||||
if (total_found > (1024 * 1024 * 2)) {
|
if (total_found > CACHING_CTL_WAKE_UP) {
|
||||||
total_found = 0;
|
total_found = 0;
|
||||||
if (wakeup)
|
if (wakeup)
|
||||||
wake_up(&caching_ctl->wait);
|
wake_up(&caching_ctl->wait);
|
||||||
@ -534,9 +531,37 @@ next:
|
|||||||
total_found += add_new_free_space(block_group, fs_info, last,
|
total_found += add_new_free_space(block_group, fs_info, last,
|
||||||
block_group->key.objectid +
|
block_group->key.objectid +
|
||||||
block_group->key.offset);
|
block_group->key.offset);
|
||||||
|
caching_ctl->progress = (u64)-1;
|
||||||
|
|
||||||
|
out:
|
||||||
|
btrfs_free_path(path);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline void caching_thread(struct btrfs_work *work)
|
||||||
|
{
|
||||||
|
struct btrfs_block_group_cache *block_group;
|
||||||
|
struct btrfs_fs_info *fs_info;
|
||||||
|
struct btrfs_caching_control *caching_ctl;
|
||||||
|
struct btrfs_root *extent_root;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
caching_ctl = container_of(work, struct btrfs_caching_control, work);
|
||||||
|
block_group = caching_ctl->block_group;
|
||||||
|
fs_info = block_group->fs_info;
|
||||||
|
extent_root = fs_info->extent_root;
|
||||||
|
|
||||||
|
mutex_lock(&caching_ctl->mutex);
|
||||||
|
down_read(&fs_info->commit_root_sem);
|
||||||
|
|
||||||
|
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
|
||||||
|
ret = load_free_space_tree(caching_ctl);
|
||||||
|
else
|
||||||
|
ret = load_extent_tree_free(caching_ctl);
|
||||||
|
|
||||||
spin_lock(&block_group->lock);
|
spin_lock(&block_group->lock);
|
||||||
block_group->caching_ctl = NULL;
|
block_group->caching_ctl = NULL;
|
||||||
block_group->cached = BTRFS_CACHE_FINISHED;
|
block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
|
||||||
spin_unlock(&block_group->lock);
|
spin_unlock(&block_group->lock);
|
||||||
|
|
||||||
#ifdef CONFIG_BTRFS_DEBUG
|
#ifdef CONFIG_BTRFS_DEBUG
|
||||||
@ -555,20 +580,11 @@ next:
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
caching_ctl->progress = (u64)-1;
|
caching_ctl->progress = (u64)-1;
|
||||||
err:
|
|
||||||
btrfs_free_path(path);
|
|
||||||
up_read(&fs_info->commit_root_sem);
|
up_read(&fs_info->commit_root_sem);
|
||||||
|
free_excluded_extents(fs_info->extent_root, block_group);
|
||||||
free_excluded_extents(extent_root, block_group);
|
|
||||||
|
|
||||||
mutex_unlock(&caching_ctl->mutex);
|
mutex_unlock(&caching_ctl->mutex);
|
||||||
out:
|
|
||||||
if (ret) {
|
|
||||||
spin_lock(&block_group->lock);
|
|
||||||
block_group->caching_ctl = NULL;
|
|
||||||
block_group->cached = BTRFS_CACHE_ERROR;
|
|
||||||
spin_unlock(&block_group->lock);
|
|
||||||
}
|
|
||||||
wake_up(&caching_ctl->wait);
|
wake_up(&caching_ctl->wait);
|
||||||
|
|
||||||
put_caching_control(caching_ctl);
|
put_caching_control(caching_ctl);
|
||||||
@ -680,8 +696,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* We are not going to do the fast caching, set cached to the
|
* We're either using the free space tree or no caching at all.
|
||||||
* appropriate value and wakeup any waiters.
|
* Set cached to the appropriate value and wakeup any waiters.
|
||||||
*/
|
*/
|
||||||
spin_lock(&cache->lock);
|
spin_lock(&cache->lock);
|
||||||
if (load_cache_only) {
|
if (load_cache_only) {
|
||||||
@ -2115,7 +2131,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
|||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
path->reada = 1;
|
path->reada = READA_FORWARD;
|
||||||
path->leave_spinning = 1;
|
path->leave_spinning = 1;
|
||||||
/* this will setup the path even if it fails to insert the back ref */
|
/* this will setup the path even if it fails to insert the back ref */
|
||||||
ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
|
ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
|
||||||
@ -2141,7 +2157,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
|||||||
btrfs_mark_buffer_dirty(leaf);
|
btrfs_mark_buffer_dirty(leaf);
|
||||||
btrfs_release_path(path);
|
btrfs_release_path(path);
|
||||||
|
|
||||||
path->reada = 1;
|
path->reada = READA_FORWARD;
|
||||||
path->leave_spinning = 1;
|
path->leave_spinning = 1;
|
||||||
/* now insert the actual backref */
|
/* now insert the actual backref */
|
||||||
ret = insert_extent_backref(trans, root->fs_info->extent_root,
|
ret = insert_extent_backref(trans, root->fs_info->extent_root,
|
||||||
@ -2254,7 +2270,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
|
|||||||
}
|
}
|
||||||
|
|
||||||
again:
|
again:
|
||||||
path->reada = 1;
|
path->reada = READA_FORWARD;
|
||||||
path->leave_spinning = 1;
|
path->leave_spinning = 1;
|
||||||
ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
|
ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
|
||||||
path, 0, 1);
|
path, 0, 1);
|
||||||
@ -2910,6 +2926,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|||||||
if (trans->aborted)
|
if (trans->aborted)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (root->fs_info->creating_free_space_tree)
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (root == root->fs_info->extent_root)
|
if (root == root->fs_info->extent_root)
|
||||||
root = root->fs_info->tree_root;
|
root = root->fs_info->tree_root;
|
||||||
|
|
||||||
@ -2988,9 +3007,9 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
extent_op->flags_to_set = flags;
|
extent_op->flags_to_set = flags;
|
||||||
extent_op->update_flags = 1;
|
extent_op->update_flags = true;
|
||||||
extent_op->update_key = 0;
|
extent_op->update_key = false;
|
||||||
extent_op->is_data = is_data ? 1 : 0;
|
extent_op->is_data = is_data ? true : false;
|
||||||
extent_op->level = level;
|
extent_op->level = level;
|
||||||
|
|
||||||
ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
|
ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
|
||||||
@ -3328,7 +3347,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
|
|||||||
* If this block group is smaller than 100 megs don't bother caching the
|
* If this block group is smaller than 100 megs don't bother caching the
|
||||||
* block group.
|
* block group.
|
||||||
*/
|
*/
|
||||||
if (block_group->key.offset < (100 * 1024 * 1024)) {
|
if (block_group->key.offset < (100 * SZ_1M)) {
|
||||||
spin_lock(&block_group->lock);
|
spin_lock(&block_group->lock);
|
||||||
block_group->disk_cache_state = BTRFS_DC_WRITTEN;
|
block_group->disk_cache_state = BTRFS_DC_WRITTEN;
|
||||||
spin_unlock(&block_group->lock);
|
spin_unlock(&block_group->lock);
|
||||||
@ -3428,7 +3447,7 @@ again:
|
|||||||
* taking up quite a bit since it's not folded into the other space
|
* taking up quite a bit since it's not folded into the other space
|
||||||
* cache.
|
* cache.
|
||||||
*/
|
*/
|
||||||
num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
|
num_pages = div_u64(block_group->key.offset, SZ_256M);
|
||||||
if (!num_pages)
|
if (!num_pages)
|
||||||
num_pages = 1;
|
num_pages = 1;
|
||||||
|
|
||||||
@ -3684,11 +3703,21 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't need the lock here since we are protected by the transaction
|
* Even though we are in the critical section of the transaction commit,
|
||||||
* commit. We want to do the cache_save_setup first and then run the
|
* we can still have concurrent tasks adding elements to this
|
||||||
|
* transaction's list of dirty block groups. These tasks correspond to
|
||||||
|
* endio free space workers started when writeback finishes for a
|
||||||
|
* space cache, which run inode.c:btrfs_finish_ordered_io(), and can
|
||||||
|
* allocate new block groups as a result of COWing nodes of the root
|
||||||
|
* tree when updating the free space inode. The writeback for the space
|
||||||
|
* caches is triggered by an earlier call to
|
||||||
|
* btrfs_start_dirty_block_groups() and iterations of the following
|
||||||
|
* loop.
|
||||||
|
* Also we want to do the cache_save_setup first and then run the
|
||||||
* delayed refs to make sure we have the best chance at doing this all
|
* delayed refs to make sure we have the best chance at doing this all
|
||||||
* in one shot.
|
* in one shot.
|
||||||
*/
|
*/
|
||||||
|
spin_lock(&cur_trans->dirty_bgs_lock);
|
||||||
while (!list_empty(&cur_trans->dirty_bgs)) {
|
while (!list_empty(&cur_trans->dirty_bgs)) {
|
||||||
cache = list_first_entry(&cur_trans->dirty_bgs,
|
cache = list_first_entry(&cur_trans->dirty_bgs,
|
||||||
struct btrfs_block_group_cache,
|
struct btrfs_block_group_cache,
|
||||||
@ -3700,11 +3729,13 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
|
|||||||
* finish and then do it all again
|
* finish and then do it all again
|
||||||
*/
|
*/
|
||||||
if (!list_empty(&cache->io_list)) {
|
if (!list_empty(&cache->io_list)) {
|
||||||
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
||||||
list_del_init(&cache->io_list);
|
list_del_init(&cache->io_list);
|
||||||
btrfs_wait_cache_io(root, trans, cache,
|
btrfs_wait_cache_io(root, trans, cache,
|
||||||
&cache->io_ctl, path,
|
&cache->io_ctl, path,
|
||||||
cache->key.objectid);
|
cache->key.objectid);
|
||||||
btrfs_put_block_group(cache);
|
btrfs_put_block_group(cache);
|
||||||
|
spin_lock(&cur_trans->dirty_bgs_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3712,6 +3743,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
|
|||||||
* on any pending IO
|
* on any pending IO
|
||||||
*/
|
*/
|
||||||
list_del_init(&cache->dirty_list);
|
list_del_init(&cache->dirty_list);
|
||||||
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
||||||
should_put = 1;
|
should_put = 1;
|
||||||
|
|
||||||
cache_save_setup(cache, trans, path);
|
cache_save_setup(cache, trans, path);
|
||||||
@ -3736,6 +3768,25 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
|
|||||||
}
|
}
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
ret = write_one_cache_group(trans, root, path, cache);
|
ret = write_one_cache_group(trans, root, path, cache);
|
||||||
|
/*
|
||||||
|
* One of the free space endio workers might have
|
||||||
|
* created a new block group while updating a free space
|
||||||
|
* cache's inode (at inode.c:btrfs_finish_ordered_io())
|
||||||
|
* and hasn't released its transaction handle yet, in
|
||||||
|
* which case the new block group is still attached to
|
||||||
|
* its transaction handle and its creation has not
|
||||||
|
* finished yet (no block group item in the extent tree
|
||||||
|
* yet, etc). If this is the case, wait for all free
|
||||||
|
* space endio workers to finish and retry. This is a
|
||||||
|
* a very rare case so no need for a more efficient and
|
||||||
|
* complex approach.
|
||||||
|
*/
|
||||||
|
if (ret == -ENOENT) {
|
||||||
|
wait_event(cur_trans->writer_wait,
|
||||||
|
atomic_read(&cur_trans->num_writers) == 1);
|
||||||
|
ret = write_one_cache_group(trans, root, path,
|
||||||
|
cache);
|
||||||
|
}
|
||||||
if (ret)
|
if (ret)
|
||||||
btrfs_abort_transaction(trans, root, ret);
|
btrfs_abort_transaction(trans, root, ret);
|
||||||
}
|
}
|
||||||
@ -3743,7 +3794,9 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
|
|||||||
/* if its not on the io list, we need to put the block group */
|
/* if its not on the io list, we need to put the block group */
|
||||||
if (should_put)
|
if (should_put)
|
||||||
btrfs_put_block_group(cache);
|
btrfs_put_block_group(cache);
|
||||||
|
spin_lock(&cur_trans->dirty_bgs_lock);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&cur_trans->dirty_bgs_lock);
|
||||||
|
|
||||||
while (!list_empty(io)) {
|
while (!list_empty(io)) {
|
||||||
cache = list_first_entry(io, struct btrfs_block_group_cache,
|
cache = list_first_entry(io, struct btrfs_block_group_cache,
|
||||||
@ -4239,14 +4292,13 @@ static int should_alloc_chunk(struct btrfs_root *root,
|
|||||||
*/
|
*/
|
||||||
if (force == CHUNK_ALLOC_LIMITED) {
|
if (force == CHUNK_ALLOC_LIMITED) {
|
||||||
thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
|
thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
|
||||||
thresh = max_t(u64, 64 * 1024 * 1024,
|
thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
|
||||||
div_factor_fine(thresh, 1));
|
|
||||||
|
|
||||||
if (num_bytes - num_allocated < thresh)
|
if (num_bytes - num_allocated < thresh)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
|
if (num_allocated + SZ_2M < div_factor(num_bytes, 8))
|
||||||
return 0;
|
return 0;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@ -4446,7 +4498,7 @@ out:
|
|||||||
* transaction.
|
* transaction.
|
||||||
*/
|
*/
|
||||||
if (trans->can_flush_pending_bgs &&
|
if (trans->can_flush_pending_bgs &&
|
||||||
trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
|
trans->chunk_bytes_reserved >= (u64)SZ_2M) {
|
||||||
btrfs_create_pending_block_groups(trans, trans->root);
|
btrfs_create_pending_block_groups(trans, trans->root);
|
||||||
btrfs_trans_release_chunk_metadata(trans);
|
btrfs_trans_release_chunk_metadata(trans);
|
||||||
}
|
}
|
||||||
@ -4544,7 +4596,7 @@ static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
|
|||||||
return nr;
|
return nr;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define EXTENT_SIZE_PER_ITEM (256 * 1024)
|
#define EXTENT_SIZE_PER_ITEM SZ_256K
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* shrink metadata reservation for delalloc
|
* shrink metadata reservation for delalloc
|
||||||
@ -4749,8 +4801,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
|
|||||||
u64 expected;
|
u64 expected;
|
||||||
u64 to_reclaim;
|
u64 to_reclaim;
|
||||||
|
|
||||||
to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
|
to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
|
||||||
16 * 1024 * 1024);
|
|
||||||
spin_lock(&space_info->lock);
|
spin_lock(&space_info->lock);
|
||||||
if (can_overcommit(root, space_info, to_reclaim,
|
if (can_overcommit(root, space_info, to_reclaim,
|
||||||
BTRFS_RESERVE_FLUSH_ALL)) {
|
BTRFS_RESERVE_FLUSH_ALL)) {
|
||||||
@ -4761,8 +4812,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
|
|||||||
used = space_info->bytes_used + space_info->bytes_reserved +
|
used = space_info->bytes_used + space_info->bytes_reserved +
|
||||||
space_info->bytes_pinned + space_info->bytes_readonly +
|
space_info->bytes_pinned + space_info->bytes_readonly +
|
||||||
space_info->bytes_may_use;
|
space_info->bytes_may_use;
|
||||||
if (can_overcommit(root, space_info, 1024 * 1024,
|
if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
|
||||||
BTRFS_RESERVE_FLUSH_ALL))
|
|
||||||
expected = div_factor_fine(space_info->total_bytes, 95);
|
expected = div_factor_fine(space_info->total_bytes, 95);
|
||||||
else
|
else
|
||||||
expected = div_factor_fine(space_info->total_bytes, 90);
|
expected = div_factor_fine(space_info->total_bytes, 90);
|
||||||
@ -5318,7 +5368,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
|
|||||||
spin_lock(&sinfo->lock);
|
spin_lock(&sinfo->lock);
|
||||||
spin_lock(&block_rsv->lock);
|
spin_lock(&block_rsv->lock);
|
||||||
|
|
||||||
block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
|
block_rsv->size = min_t(u64, num_bytes, SZ_512M);
|
||||||
|
|
||||||
num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
|
num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
|
||||||
sinfo->bytes_reserved + sinfo->bytes_readonly +
|
sinfo->bytes_reserved + sinfo->bytes_readonly +
|
||||||
@ -6222,11 +6272,11 @@ fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (ssd)
|
if (ssd)
|
||||||
*empty_cluster = 2 * 1024 * 1024;
|
*empty_cluster = SZ_2M;
|
||||||
if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
|
if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
|
||||||
ret = &root->fs_info->meta_alloc_cluster;
|
ret = &root->fs_info->meta_alloc_cluster;
|
||||||
if (!ssd)
|
if (!ssd)
|
||||||
*empty_cluster = 64 * 1024;
|
*empty_cluster = SZ_64K;
|
||||||
} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
|
} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
|
||||||
ret = &root->fs_info->data_alloc_cluster;
|
ret = &root->fs_info->data_alloc_cluster;
|
||||||
}
|
}
|
||||||
@ -6438,7 +6488,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
|||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
path->reada = 1;
|
path->reada = READA_FORWARD;
|
||||||
path->leave_spinning = 1;
|
path->leave_spinning = 1;
|
||||||
|
|
||||||
is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
|
is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
|
||||||
@ -6661,6 +6711,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
|
||||||
|
num_bytes);
|
||||||
|
if (ret) {
|
||||||
|
btrfs_abort_transaction(trans, extent_root, ret);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ret = update_block_group(trans, root, bytenr, num_bytes, 0);
|
ret = update_block_group(trans, root, bytenr, num_bytes, 0);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
btrfs_abort_transaction(trans, extent_root, ret);
|
btrfs_abort_transaction(trans, extent_root, ret);
|
||||||
@ -7672,6 +7729,11 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
|
|||||||
btrfs_mark_buffer_dirty(path->nodes[0]);
|
btrfs_mark_buffer_dirty(path->nodes[0]);
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
|
|
||||||
|
ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
|
||||||
|
ins->offset);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
|
ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
|
||||||
if (ret) { /* -ENOENT, logic error */
|
if (ret) { /* -ENOENT, logic error */
|
||||||
btrfs_err(fs_info, "update block group failed for %llu %llu",
|
btrfs_err(fs_info, "update block group failed for %llu %llu",
|
||||||
@ -7752,6 +7814,11 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
|
|||||||
btrfs_mark_buffer_dirty(leaf);
|
btrfs_mark_buffer_dirty(leaf);
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
|
|
||||||
|
ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
|
||||||
|
num_bytes);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
ret = update_block_group(trans, root, ins->objectid, root->nodesize,
|
ret = update_block_group(trans, root, ins->objectid, root->nodesize,
|
||||||
1);
|
1);
|
||||||
if (ret) { /* -ENOENT, logic error */
|
if (ret) { /* -ENOENT, logic error */
|
||||||
@ -7834,7 +7901,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||||||
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
|
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
|
||||||
|
|
||||||
btrfs_set_lock_blocking(buf);
|
btrfs_set_lock_blocking(buf);
|
||||||
btrfs_set_buffer_uptodate(buf);
|
set_extent_buffer_uptodate(buf);
|
||||||
|
|
||||||
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
|
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
|
||||||
buf->log_index = root->log_transid % 2;
|
buf->log_index = root->log_transid % 2;
|
||||||
@ -7980,12 +8047,9 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
|
|||||||
else
|
else
|
||||||
memset(&extent_op->key, 0, sizeof(extent_op->key));
|
memset(&extent_op->key, 0, sizeof(extent_op->key));
|
||||||
extent_op->flags_to_set = flags;
|
extent_op->flags_to_set = flags;
|
||||||
if (skinny_metadata)
|
extent_op->update_key = skinny_metadata ? false : true;
|
||||||
extent_op->update_key = 0;
|
extent_op->update_flags = true;
|
||||||
else
|
extent_op->is_data = false;
|
||||||
extent_op->update_key = 1;
|
|
||||||
extent_op->update_flags = 1;
|
|
||||||
extent_op->is_data = 0;
|
|
||||||
extent_op->level = level;
|
extent_op->level = level;
|
||||||
|
|
||||||
ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
|
ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
|
||||||
@ -9124,7 +9188,7 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
|
|||||||
if ((sinfo->flags &
|
if ((sinfo->flags &
|
||||||
(BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
|
(BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
|
||||||
!force)
|
!force)
|
||||||
min_allocable_bytes = 1 * 1024 * 1024;
|
min_allocable_bytes = SZ_1M;
|
||||||
else
|
else
|
||||||
min_allocable_bytes = 0;
|
min_allocable_bytes = 0;
|
||||||
|
|
||||||
@ -9656,6 +9720,8 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
|
|||||||
cache->full_stripe_len = btrfs_full_stripe_len(root,
|
cache->full_stripe_len = btrfs_full_stripe_len(root,
|
||||||
&root->fs_info->mapping_tree,
|
&root->fs_info->mapping_tree,
|
||||||
start);
|
start);
|
||||||
|
set_free_space_tree_thresholds(cache);
|
||||||
|
|
||||||
atomic_set(&cache->count, 1);
|
atomic_set(&cache->count, 1);
|
||||||
spin_lock_init(&cache->lock);
|
spin_lock_init(&cache->lock);
|
||||||
init_rwsem(&cache->data_rwsem);
|
init_rwsem(&cache->data_rwsem);
|
||||||
@ -9667,6 +9733,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
|
|||||||
INIT_LIST_HEAD(&cache->io_list);
|
INIT_LIST_HEAD(&cache->io_list);
|
||||||
btrfs_init_free_space_ctl(cache);
|
btrfs_init_free_space_ctl(cache);
|
||||||
atomic_set(&cache->trimming, 0);
|
atomic_set(&cache->trimming, 0);
|
||||||
|
mutex_init(&cache->free_space_lock);
|
||||||
|
|
||||||
return cache;
|
return cache;
|
||||||
}
|
}
|
||||||
@ -9691,7 +9758,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
|||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
path->reada = 1;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
|
cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
|
||||||
if (btrfs_test_opt(root, SPACE_CACHE) &&
|
if (btrfs_test_opt(root, SPACE_CACHE) &&
|
||||||
@ -9877,6 +9944,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
|
|||||||
key.objectid, key.offset);
|
key.objectid, key.offset);
|
||||||
if (ret)
|
if (ret)
|
||||||
btrfs_abort_transaction(trans, extent_root, ret);
|
btrfs_abort_transaction(trans, extent_root, ret);
|
||||||
|
add_block_group_free_space(trans, root->fs_info, block_group);
|
||||||
|
/* already aborted the transaction if it failed. */
|
||||||
next:
|
next:
|
||||||
list_del_init(&block_group->bg_list);
|
list_del_init(&block_group->bg_list);
|
||||||
}
|
}
|
||||||
@ -9907,6 +9976,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
|||||||
cache->flags = type;
|
cache->flags = type;
|
||||||
cache->last_byte_to_unpin = (u64)-1;
|
cache->last_byte_to_unpin = (u64)-1;
|
||||||
cache->cached = BTRFS_CACHE_FINISHED;
|
cache->cached = BTRFS_CACHE_FINISHED;
|
||||||
|
cache->needs_free_space = 1;
|
||||||
ret = exclude_super_stripes(root, cache);
|
ret = exclude_super_stripes(root, cache);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/*
|
/*
|
||||||
@ -10277,6 +10347,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
|
|||||||
|
|
||||||
unlock_chunks(root);
|
unlock_chunks(root);
|
||||||
|
|
||||||
|
ret = remove_block_group_free_space(trans, root->fs_info, block_group);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
btrfs_put_block_group(block_group);
|
btrfs_put_block_group(block_group);
|
||||||
btrfs_put_block_group(block_group);
|
btrfs_put_block_group(block_group);
|
||||||
|
|
||||||
|
@ -1285,20 +1285,6 @@ search_again:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* wrappers around set/clear extent bit */
|
/* wrappers around set/clear extent bit */
|
||||||
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
|
|
||||||
NULL, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
unsigned bits, gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end, bits, NULL,
|
|
||||||
NULL, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, gfp_t mask,
|
||||||
struct extent_changeset *changeset)
|
struct extent_changeset *changeset)
|
||||||
@ -1323,17 +1309,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
|||||||
cached, mask, NULL);
|
cached, mask, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
unsigned bits, gfp_t mask)
|
|
||||||
{
|
|
||||||
int wake = 0;
|
|
||||||
|
|
||||||
if (bits & EXTENT_LOCKED)
|
|
||||||
wake = 1;
|
|
||||||
|
|
||||||
return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, gfp_t mask,
|
||||||
struct extent_changeset *changeset)
|
struct extent_changeset *changeset)
|
||||||
@ -1348,63 +1323,18 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|||||||
changeset);
|
changeset);
|
||||||
}
|
}
|
||||||
|
|
||||||
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end,
|
|
||||||
EXTENT_DELALLOC | EXTENT_UPTODATE,
|
|
||||||
NULL, cached_state, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end,
|
|
||||||
EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
|
|
||||||
NULL, cached_state, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
gfp_t mask)
|
|
||||||
{
|
|
||||||
return clear_extent_bit(tree, start, end,
|
|
||||||
EXTENT_DIRTY | EXTENT_DELALLOC |
|
|
||||||
EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
|
|
||||||
NULL, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
|
|
||||||
cached_state, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask)
|
|
||||||
{
|
|
||||||
return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
|
|
||||||
cached_state, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* either insert or lock state struct between start and end use mask to tell
|
* either insert or lock state struct between start and end use mask to tell
|
||||||
* us if waiting is desired.
|
* us if waiting is desired.
|
||||||
*/
|
*/
|
||||||
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, struct extent_state **cached_state)
|
struct extent_state **cached_state)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
u64 failed_start;
|
u64 failed_start;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
|
err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
|
||||||
EXTENT_LOCKED, &failed_start,
|
EXTENT_LOCKED, &failed_start,
|
||||||
cached_state, GFP_NOFS, NULL);
|
cached_state, GFP_NOFS, NULL);
|
||||||
if (err == -EEXIST) {
|
if (err == -EEXIST) {
|
||||||
@ -1417,11 +1347,6 @@ int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
|
||||||
{
|
|
||||||
return lock_extent_bits(tree, start, end, 0, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
@ -1438,20 +1363,7 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
|
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
|
||||||
struct extent_state **cached, gfp_t mask)
|
|
||||||
{
|
|
||||||
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
|
|
||||||
mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
|
||||||
{
|
|
||||||
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
|
|
||||||
GFP_NOFS);
|
|
||||||
}
|
|
||||||
|
|
||||||
int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
|
|
||||||
{
|
{
|
||||||
unsigned long index = start >> PAGE_CACHE_SHIFT;
|
unsigned long index = start >> PAGE_CACHE_SHIFT;
|
||||||
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
|
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
|
||||||
@ -1464,10 +1376,9 @@ int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
|
|||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
index++;
|
index++;
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
|
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
unsigned long index = start >> PAGE_CACHE_SHIFT;
|
unsigned long index = start >> PAGE_CACHE_SHIFT;
|
||||||
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
|
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
|
||||||
@ -1481,13 +1392,12 @@ int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
|
|||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
index++;
|
index++;
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* helper function to set both pages and extents in the tree writeback
|
* helper function to set both pages and extents in the tree writeback
|
||||||
*/
|
*/
|
||||||
static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
|
static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
unsigned long index = start >> PAGE_CACHE_SHIFT;
|
unsigned long index = start >> PAGE_CACHE_SHIFT;
|
||||||
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
|
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
|
||||||
@ -1500,7 +1410,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
|
|||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
index++;
|
index++;
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* find the first state struct with 'bits' set after 'start', and
|
/* find the first state struct with 'bits' set after 'start', and
|
||||||
@ -1800,7 +1709,7 @@ again:
|
|||||||
BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
|
BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
|
||||||
|
|
||||||
/* step three, lock the state bits for the whole range */
|
/* step three, lock the state bits for the whole range */
|
||||||
lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
|
lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
|
||||||
|
|
||||||
/* then test to make sure it is all still delalloc */
|
/* then test to make sure it is all still delalloc */
|
||||||
ret = test_range_bit(tree, delalloc_start, delalloc_end,
|
ret = test_range_bit(tree, delalloc_start, delalloc_end,
|
||||||
@ -1820,7 +1729,7 @@ out_failed:
|
|||||||
return found;
|
return found;
|
||||||
}
|
}
|
||||||
|
|
||||||
int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
|
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
|
||||||
struct page *locked_page,
|
struct page *locked_page,
|
||||||
unsigned clear_bits,
|
unsigned clear_bits,
|
||||||
unsigned long page_ops)
|
unsigned long page_ops)
|
||||||
@ -1835,7 +1744,7 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
|
|||||||
|
|
||||||
clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
|
clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
|
||||||
if (page_ops == 0)
|
if (page_ops == 0)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
|
if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
|
||||||
mapping_set_error(inode->i_mapping, -EIO);
|
mapping_set_error(inode->i_mapping, -EIO);
|
||||||
@ -1869,7 +1778,6 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
|
|||||||
index += ret;
|
index += ret;
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2516,7 +2424,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
|||||||
|
|
||||||
/* lots and lots of room for performance fixes in the end_bio funcs */
|
/* lots and lots of room for performance fixes in the end_bio funcs */
|
||||||
|
|
||||||
int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
|
void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
int uptodate = (err == 0);
|
int uptodate = (err == 0);
|
||||||
struct extent_io_tree *tree;
|
struct extent_io_tree *tree;
|
||||||
@ -2537,7 +2445,6 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
|
|||||||
ret = ret < 0 ? ret : -EIO;
|
ret = ret < 0 ? ret : -EIO;
|
||||||
mapping_set_error(page->mapping, ret);
|
mapping_set_error(page->mapping, ret);
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2579,9 +2486,7 @@ static void end_bio_extent_writepage(struct bio *bio)
|
|||||||
start = page_offset(page);
|
start = page_offset(page);
|
||||||
end = start + bvec->bv_offset + bvec->bv_len - 1;
|
end = start + bvec->bv_offset + bvec->bv_len - 1;
|
||||||
|
|
||||||
if (end_extent_writepage(page, bio->bi_error, start, end))
|
end_extent_writepage(page, bio->bi_error, start, end);
|
||||||
continue;
|
|
||||||
|
|
||||||
end_page_writeback(page);
|
end_page_writeback(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4326,7 +4231,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
|
|||||||
if (start > end)
|
if (start > end)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
lock_extent_bits(tree, start, end, 0, &cached_state);
|
lock_extent_bits(tree, start, end, &cached_state);
|
||||||
wait_on_page_writeback(page);
|
wait_on_page_writeback(page);
|
||||||
clear_extent_bit(tree, start, end,
|
clear_extent_bit(tree, start, end,
|
||||||
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
|
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||||
@ -4387,7 +4292,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
|
|||||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||||
|
|
||||||
if (gfpflags_allow_blocking(mask) &&
|
if (gfpflags_allow_blocking(mask) &&
|
||||||
page->mapping->host->i_size > 16 * 1024 * 1024) {
|
page->mapping->host->i_size > SZ_16M) {
|
||||||
u64 len;
|
u64 len;
|
||||||
while (start <= end) {
|
while (start <= end) {
|
||||||
len = end - start + 1;
|
len = end - start + 1;
|
||||||
@ -4536,7 +4441,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||||||
last_for_get_extent = isize;
|
last_for_get_extent = isize;
|
||||||
}
|
}
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
|
||||||
&cached_state);
|
&cached_state);
|
||||||
|
|
||||||
em = get_extent_skip_holes(inode, start, last_for_get_extent,
|
em = get_extent_skip_holes(inode, start, last_for_get_extent,
|
||||||
@ -4797,24 +4702,14 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
|
|||||||
return new;
|
return new;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||||
u64 start)
|
u64 start, unsigned long len)
|
||||||
{
|
{
|
||||||
struct extent_buffer *eb;
|
struct extent_buffer *eb;
|
||||||
unsigned long len;
|
|
||||||
unsigned long num_pages;
|
unsigned long num_pages;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
if (!fs_info) {
|
num_pages = num_extent_pages(start, len);
|
||||||
/*
|
|
||||||
* Called only from tests that don't always have a fs_info
|
|
||||||
* available, but we know that nodesize is 4096
|
|
||||||
*/
|
|
||||||
len = 4096;
|
|
||||||
} else {
|
|
||||||
len = fs_info->tree_root->nodesize;
|
|
||||||
}
|
|
||||||
num_pages = num_extent_pages(0, len);
|
|
||||||
|
|
||||||
eb = __alloc_extent_buffer(fs_info, start, len);
|
eb = __alloc_extent_buffer(fs_info, start, len);
|
||||||
if (!eb)
|
if (!eb)
|
||||||
@ -4837,6 +4732,24 @@ err:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||||
|
u64 start)
|
||||||
|
{
|
||||||
|
unsigned long len;
|
||||||
|
|
||||||
|
if (!fs_info) {
|
||||||
|
/*
|
||||||
|
* Called only from tests that don't always have a fs_info
|
||||||
|
* available, but we know that nodesize is 4096
|
||||||
|
*/
|
||||||
|
len = 4096;
|
||||||
|
} else {
|
||||||
|
len = fs_info->tree_root->nodesize;
|
||||||
|
}
|
||||||
|
|
||||||
|
return __alloc_dummy_extent_buffer(fs_info, start, len);
|
||||||
|
}
|
||||||
|
|
||||||
static void check_buffer_tree_ref(struct extent_buffer *eb)
|
static void check_buffer_tree_ref(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
int refs;
|
int refs;
|
||||||
@ -5227,7 +5140,7 @@ int set_extent_buffer_dirty(struct extent_buffer *eb)
|
|||||||
return was_dirty;
|
return was_dirty;
|
||||||
}
|
}
|
||||||
|
|
||||||
int clear_extent_buffer_uptodate(struct extent_buffer *eb)
|
void clear_extent_buffer_uptodate(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
@ -5240,10 +5153,9 @@ int clear_extent_buffer_uptodate(struct extent_buffer *eb)
|
|||||||
if (page)
|
if (page)
|
||||||
ClearPageUptodate(page);
|
ClearPageUptodate(page);
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int set_extent_buffer_uptodate(struct extent_buffer *eb)
|
void set_extent_buffer_uptodate(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
@ -5255,7 +5167,6 @@ int set_extent_buffer_uptodate(struct extent_buffer *eb)
|
|||||||
page = eb->pages[i];
|
page = eb->pages[i];
|
||||||
SetPageUptodate(page);
|
SetPageUptodate(page);
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int extent_buffer_uptodate(struct extent_buffer *eb)
|
int extent_buffer_uptodate(struct extent_buffer *eb)
|
||||||
@ -5594,6 +5505,155 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The extent buffer bitmap operations are done with byte granularity because
|
||||||
|
* bitmap items are not guaranteed to be aligned to a word and therefore a
|
||||||
|
* single word in a bitmap may straddle two pages in the extent buffer.
|
||||||
|
*/
|
||||||
|
#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
|
||||||
|
#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
|
||||||
|
#define BITMAP_FIRST_BYTE_MASK(start) \
|
||||||
|
((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
|
||||||
|
#define BITMAP_LAST_BYTE_MASK(nbits) \
|
||||||
|
(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* eb_bitmap_offset() - calculate the page and offset of the byte containing the
|
||||||
|
* given bit number
|
||||||
|
* @eb: the extent buffer
|
||||||
|
* @start: offset of the bitmap item in the extent buffer
|
||||||
|
* @nr: bit number
|
||||||
|
* @page_index: return index of the page in the extent buffer that contains the
|
||||||
|
* given bit number
|
||||||
|
* @page_offset: return offset into the page given by page_index
|
||||||
|
*
|
||||||
|
* This helper hides the ugliness of finding the byte in an extent buffer which
|
||||||
|
* contains a given bit.
|
||||||
|
*/
|
||||||
|
static inline void eb_bitmap_offset(struct extent_buffer *eb,
|
||||||
|
unsigned long start, unsigned long nr,
|
||||||
|
unsigned long *page_index,
|
||||||
|
size_t *page_offset)
|
||||||
|
{
|
||||||
|
size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
|
||||||
|
size_t byte_offset = BIT_BYTE(nr);
|
||||||
|
size_t offset;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The byte we want is the offset of the extent buffer + the offset of
|
||||||
|
* the bitmap item in the extent buffer + the offset of the byte in the
|
||||||
|
* bitmap item.
|
||||||
|
*/
|
||||||
|
offset = start_offset + start + byte_offset;
|
||||||
|
|
||||||
|
*page_index = offset >> PAGE_CACHE_SHIFT;
|
||||||
|
*page_offset = offset & (PAGE_CACHE_SIZE - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* extent_buffer_test_bit - determine whether a bit in a bitmap item is set
|
||||||
|
* @eb: the extent buffer
|
||||||
|
* @start: offset of the bitmap item in the extent buffer
|
||||||
|
* @nr: bit number to test
|
||||||
|
*/
|
||||||
|
int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
|
||||||
|
unsigned long nr)
|
||||||
|
{
|
||||||
|
char *kaddr;
|
||||||
|
struct page *page;
|
||||||
|
unsigned long i;
|
||||||
|
size_t offset;
|
||||||
|
|
||||||
|
eb_bitmap_offset(eb, start, nr, &i, &offset);
|
||||||
|
page = eb->pages[i];
|
||||||
|
WARN_ON(!PageUptodate(page));
|
||||||
|
kaddr = page_address(page);
|
||||||
|
return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* extent_buffer_bitmap_set - set an area of a bitmap
|
||||||
|
* @eb: the extent buffer
|
||||||
|
* @start: offset of the bitmap item in the extent buffer
|
||||||
|
* @pos: bit number of the first bit
|
||||||
|
* @len: number of bits to set
|
||||||
|
*/
|
||||||
|
void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
|
||||||
|
unsigned long pos, unsigned long len)
|
||||||
|
{
|
||||||
|
char *kaddr;
|
||||||
|
struct page *page;
|
||||||
|
unsigned long i;
|
||||||
|
size_t offset;
|
||||||
|
const unsigned int size = pos + len;
|
||||||
|
int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
|
||||||
|
unsigned int mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
|
||||||
|
|
||||||
|
eb_bitmap_offset(eb, start, pos, &i, &offset);
|
||||||
|
page = eb->pages[i];
|
||||||
|
WARN_ON(!PageUptodate(page));
|
||||||
|
kaddr = page_address(page);
|
||||||
|
|
||||||
|
while (len >= bits_to_set) {
|
||||||
|
kaddr[offset] |= mask_to_set;
|
||||||
|
len -= bits_to_set;
|
||||||
|
bits_to_set = BITS_PER_BYTE;
|
||||||
|
mask_to_set = ~0U;
|
||||||
|
if (++offset >= PAGE_CACHE_SIZE && len > 0) {
|
||||||
|
offset = 0;
|
||||||
|
page = eb->pages[++i];
|
||||||
|
WARN_ON(!PageUptodate(page));
|
||||||
|
kaddr = page_address(page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (len) {
|
||||||
|
mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
|
||||||
|
kaddr[offset] |= mask_to_set;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* extent_buffer_bitmap_clear - clear an area of a bitmap
|
||||||
|
* @eb: the extent buffer
|
||||||
|
* @start: offset of the bitmap item in the extent buffer
|
||||||
|
* @pos: bit number of the first bit
|
||||||
|
* @len: number of bits to clear
|
||||||
|
*/
|
||||||
|
void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
|
||||||
|
unsigned long pos, unsigned long len)
|
||||||
|
{
|
||||||
|
char *kaddr;
|
||||||
|
struct page *page;
|
||||||
|
unsigned long i;
|
||||||
|
size_t offset;
|
||||||
|
const unsigned int size = pos + len;
|
||||||
|
int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
|
||||||
|
unsigned int mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
|
||||||
|
|
||||||
|
eb_bitmap_offset(eb, start, pos, &i, &offset);
|
||||||
|
page = eb->pages[i];
|
||||||
|
WARN_ON(!PageUptodate(page));
|
||||||
|
kaddr = page_address(page);
|
||||||
|
|
||||||
|
while (len >= bits_to_clear) {
|
||||||
|
kaddr[offset] &= ~mask_to_clear;
|
||||||
|
len -= bits_to_clear;
|
||||||
|
bits_to_clear = BITS_PER_BYTE;
|
||||||
|
mask_to_clear = ~0U;
|
||||||
|
if (++offset >= PAGE_CACHE_SIZE && len > 0) {
|
||||||
|
offset = 0;
|
||||||
|
page = eb->pages[++i];
|
||||||
|
WARN_ON(!PageUptodate(page));
|
||||||
|
kaddr = page_address(page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (len) {
|
||||||
|
mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
|
||||||
|
kaddr[offset] &= ~mask_to_clear;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
|
static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
|
||||||
{
|
{
|
||||||
unsigned long distance = (src > dst) ? src - dst : dst - src;
|
unsigned long distance = (src > dst) ? src - dst : dst - src;
|
||||||
|
@ -199,12 +199,14 @@ int try_release_extent_mapping(struct extent_map_tree *map,
|
|||||||
struct extent_io_tree *tree, struct page *page,
|
struct extent_io_tree *tree, struct page *page,
|
||||||
gfp_t mask);
|
gfp_t mask);
|
||||||
int try_release_extent_buffer(struct page *page);
|
int try_release_extent_buffer(struct page *page);
|
||||||
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
|
|
||||||
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, struct extent_state **cached);
|
struct extent_state **cached);
|
||||||
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
|
|
||||||
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
|
static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
||||||
struct extent_state **cached, gfp_t mask);
|
{
|
||||||
|
return lock_extent_bits(tree, start, end, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
|
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
|
||||||
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
||||||
get_extent_t *get_extent, int mirror_num);
|
get_extent_t *get_extent, int mirror_num);
|
||||||
@ -221,39 +223,105 @@ void free_extent_state(struct extent_state *state);
|
|||||||
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, int filled,
|
unsigned bits, int filled,
|
||||||
struct extent_state *cached_state);
|
struct extent_state *cached_state);
|
||||||
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
unsigned bits, gfp_t mask);
|
|
||||||
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, gfp_t mask,
|
||||||
struct extent_changeset *changeset);
|
struct extent_changeset *changeset);
|
||||||
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, int wake, int delete,
|
unsigned bits, int wake, int delete,
|
||||||
struct extent_state **cached, gfp_t mask);
|
struct extent_state **cached, gfp_t mask);
|
||||||
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
unsigned bits, gfp_t mask);
|
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
||||||
|
{
|
||||||
|
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
|
||||||
|
GFP_NOFS);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, struct extent_state **cached, gfp_t mask)
|
||||||
|
{
|
||||||
|
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
|
||||||
|
mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, unsigned bits, gfp_t mask)
|
||||||
|
{
|
||||||
|
int wake = 0;
|
||||||
|
|
||||||
|
if (bits & EXTENT_LOCKED)
|
||||||
|
wake = 1;
|
||||||
|
|
||||||
|
return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask);
|
||||||
|
}
|
||||||
|
|
||||||
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, gfp_t mask,
|
||||||
struct extent_changeset *changeset);
|
struct extent_changeset *changeset);
|
||||||
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, u64 *failed_start,
|
unsigned bits, u64 *failed_start,
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
struct extent_state **cached_state, gfp_t mask);
|
||||||
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
|
||||||
int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
|
u64 end, unsigned bits, gfp_t mask)
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
{
|
||||||
int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
|
return set_extent_bit(tree, start, end, bits, NULL, NULL, mask);
|
||||||
gfp_t mask);
|
}
|
||||||
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
gfp_t mask);
|
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
||||||
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
|
u64 end, struct extent_state **cached_state, gfp_t mask)
|
||||||
gfp_t mask);
|
{
|
||||||
|
return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
|
||||||
|
cached_state, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, gfp_t mask)
|
||||||
|
{
|
||||||
|
return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
|
||||||
|
NULL, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, gfp_t mask)
|
||||||
|
{
|
||||||
|
return clear_extent_bit(tree, start, end,
|
||||||
|
EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||||
|
EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
|
||||||
|
}
|
||||||
|
|
||||||
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, unsigned clear_bits,
|
unsigned bits, unsigned clear_bits,
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
struct extent_state **cached_state, gfp_t mask);
|
||||||
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
|
||||||
int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
|
u64 end, struct extent_state **cached_state, gfp_t mask)
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
{
|
||||||
|
return set_extent_bit(tree, start, end,
|
||||||
|
EXTENT_DELALLOC | EXTENT_UPTODATE,
|
||||||
|
NULL, cached_state, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, struct extent_state **cached_state, gfp_t mask)
|
||||||
|
{
|
||||||
|
return set_extent_bit(tree, start, end,
|
||||||
|
EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
|
||||||
|
NULL, cached_state, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, gfp_t mask)
|
||||||
|
{
|
||||||
|
return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, struct extent_state **cached_state, gfp_t mask)
|
||||||
|
{
|
||||||
|
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
|
||||||
|
cached_state, mask);
|
||||||
|
}
|
||||||
|
|
||||||
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
|
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
|
||||||
u64 *start_ret, u64 *end_ret, unsigned bits,
|
u64 *start_ret, u64 *end_ret, unsigned bits,
|
||||||
struct extent_state **cached_state);
|
struct extent_state **cached_state);
|
||||||
@ -282,8 +350,10 @@ void set_page_extent_mapped(struct page *page);
|
|||||||
|
|
||||||
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||||
u64 start);
|
u64 start);
|
||||||
|
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||||
|
u64 start, unsigned long len);
|
||||||
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||||
u64 start);
|
u64 start);
|
||||||
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
|
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
|
||||||
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
|
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||||
u64 start);
|
u64 start);
|
||||||
@ -328,19 +398,25 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
|
|||||||
unsigned long src_offset, unsigned long len);
|
unsigned long src_offset, unsigned long len);
|
||||||
void memset_extent_buffer(struct extent_buffer *eb, char c,
|
void memset_extent_buffer(struct extent_buffer *eb, char c,
|
||||||
unsigned long start, unsigned long len);
|
unsigned long start, unsigned long len);
|
||||||
|
int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
|
||||||
|
unsigned long pos);
|
||||||
|
void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
|
||||||
|
unsigned long pos, unsigned long len);
|
||||||
|
void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
|
||||||
|
unsigned long pos, unsigned long len);
|
||||||
void clear_extent_buffer_dirty(struct extent_buffer *eb);
|
void clear_extent_buffer_dirty(struct extent_buffer *eb);
|
||||||
int set_extent_buffer_dirty(struct extent_buffer *eb);
|
int set_extent_buffer_dirty(struct extent_buffer *eb);
|
||||||
int set_extent_buffer_uptodate(struct extent_buffer *eb);
|
void set_extent_buffer_uptodate(struct extent_buffer *eb);
|
||||||
int clear_extent_buffer_uptodate(struct extent_buffer *eb);
|
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
|
||||||
int extent_buffer_uptodate(struct extent_buffer *eb);
|
int extent_buffer_uptodate(struct extent_buffer *eb);
|
||||||
int extent_buffer_under_io(struct extent_buffer *eb);
|
int extent_buffer_under_io(struct extent_buffer *eb);
|
||||||
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
|
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
|
||||||
unsigned long min_len, char **map,
|
unsigned long min_len, char **map,
|
||||||
unsigned long *map_start,
|
unsigned long *map_start,
|
||||||
unsigned long *map_len);
|
unsigned long *map_len);
|
||||||
int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
|
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
|
||||||
int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
|
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
|
||||||
int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
|
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
|
||||||
struct page *locked_page,
|
struct page *locked_page,
|
||||||
unsigned bits_to_clear,
|
unsigned bits_to_clear,
|
||||||
unsigned long page_ops);
|
unsigned long page_ops);
|
||||||
@ -357,7 +433,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
|
|||||||
int mirror_num);
|
int mirror_num);
|
||||||
int clean_io_failure(struct inode *inode, u64 start, struct page *page,
|
int clean_io_failure(struct inode *inode, u64 start, struct page *page,
|
||||||
unsigned int pg_offset);
|
unsigned int pg_offset);
|
||||||
int end_extent_writepage(struct page *page, int err, u64 start, u64 end);
|
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
|
||||||
int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
|
int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
|
||||||
int mirror_num);
|
int mirror_num);
|
||||||
|
|
||||||
|
@ -202,7 +202,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
|
if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
|
||||||
path->reada = 2;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
WARN_ON(bio->bi_vcnt <= 0);
|
WARN_ON(bio->bi_vcnt <= 0);
|
||||||
|
|
||||||
@ -328,7 +328,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
|
|||||||
|
|
||||||
if (search_commit) {
|
if (search_commit) {
|
||||||
path->skip_locking = 1;
|
path->skip_locking = 1;
|
||||||
path->reada = 2;
|
path->reada = READA_FORWARD;
|
||||||
path->search_commit_root = 1;
|
path->search_commit_root = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1394,7 +1394,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
|
|||||||
if (start_pos < inode->i_size) {
|
if (start_pos < inode->i_size) {
|
||||||
struct btrfs_ordered_extent *ordered;
|
struct btrfs_ordered_extent *ordered;
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree,
|
||||||
start_pos, last_pos, 0, cached_state);
|
start_pos, last_pos, cached_state);
|
||||||
ordered = btrfs_lookup_ordered_range(inode, start_pos,
|
ordered = btrfs_lookup_ordered_range(inode, start_pos,
|
||||||
last_pos - start_pos + 1);
|
last_pos - start_pos + 1);
|
||||||
if (ordered &&
|
if (ordered &&
|
||||||
@ -2398,7 +2398,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||||||
truncate_pagecache_range(inode, lockstart, lockend);
|
truncate_pagecache_range(inode, lockstart, lockend);
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
||||||
0, &cached_state);
|
&cached_state);
|
||||||
ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
|
ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2705,7 +2705,7 @@ static long btrfs_fallocate(struct file *file, int mode,
|
|||||||
* transaction
|
* transaction
|
||||||
*/
|
*/
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
|
||||||
locked_end, 0, &cached_state);
|
locked_end, &cached_state);
|
||||||
ordered = btrfs_lookup_first_ordered_extent(inode,
|
ordered = btrfs_lookup_first_ordered_extent(inode,
|
||||||
alloc_end - 1);
|
alloc_end - 1);
|
||||||
if (ordered &&
|
if (ordered &&
|
||||||
@ -2852,7 +2852,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
|
|||||||
lockend--;
|
lockend--;
|
||||||
len = lockend - lockstart + 1;
|
len = lockend - lockstart + 1;
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
||||||
&cached_state);
|
&cached_state);
|
||||||
|
|
||||||
while (start < inode->i_size) {
|
while (start < inode->i_size) {
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
#include "volumes.h"
|
#include "volumes.h"
|
||||||
|
|
||||||
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
|
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
|
||||||
#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
|
#define MAX_CACHE_BYTES_PER_GIG SZ_32K
|
||||||
|
|
||||||
struct btrfs_trim_range {
|
struct btrfs_trim_range {
|
||||||
u64 start;
|
u64 start;
|
||||||
@ -1086,14 +1086,11 @@ write_pinned_extent_entries(struct btrfs_root *root,
|
|||||||
static noinline_for_stack int
|
static noinline_for_stack int
|
||||||
write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
|
write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
|
||||||
{
|
{
|
||||||
struct list_head *pos, *n;
|
struct btrfs_free_space *entry, *next;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Write out the bitmaps */
|
/* Write out the bitmaps */
|
||||||
list_for_each_safe(pos, n, bitmap_list) {
|
list_for_each_entry_safe(entry, next, bitmap_list, list) {
|
||||||
struct btrfs_free_space *entry =
|
|
||||||
list_entry(pos, struct btrfs_free_space, list);
|
|
||||||
|
|
||||||
ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
|
ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
@ -1119,13 +1116,10 @@ static int flush_dirty_cache(struct inode *inode)
|
|||||||
static void noinline_for_stack
|
static void noinline_for_stack
|
||||||
cleanup_bitmap_list(struct list_head *bitmap_list)
|
cleanup_bitmap_list(struct list_head *bitmap_list)
|
||||||
{
|
{
|
||||||
struct list_head *pos, *n;
|
struct btrfs_free_space *entry, *next;
|
||||||
|
|
||||||
list_for_each_safe(pos, n, bitmap_list) {
|
list_for_each_entry_safe(entry, next, bitmap_list, list)
|
||||||
struct btrfs_free_space *entry =
|
|
||||||
list_entry(pos, struct btrfs_free_space, list);
|
|
||||||
list_del_init(&entry->list);
|
list_del_init(&entry->list);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void noinline_for_stack
|
static void noinline_for_stack
|
||||||
@ -1261,7 +1255,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
|
||||||
0, &cached_state);
|
&cached_state);
|
||||||
|
|
||||||
io_ctl_set_generation(io_ctl, trans->transid);
|
io_ctl_set_generation(io_ctl, trans->transid);
|
||||||
|
|
||||||
@ -1656,11 +1650,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
|
|||||||
* at or below 32k, so we need to adjust how much memory we allow to be
|
* at or below 32k, so we need to adjust how much memory we allow to be
|
||||||
* used by extent based free space tracking
|
* used by extent based free space tracking
|
||||||
*/
|
*/
|
||||||
if (size < 1024 * 1024 * 1024)
|
if (size < SZ_1G)
|
||||||
max_bytes = MAX_CACHE_BYTES_PER_GIG;
|
max_bytes = MAX_CACHE_BYTES_PER_GIG;
|
||||||
else
|
else
|
||||||
max_bytes = MAX_CACHE_BYTES_PER_GIG *
|
max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
|
||||||
div_u64(size, 1024 * 1024 * 1024);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we want to account for 1 more bitmap than what we have so we can make
|
* we want to account for 1 more bitmap than what we have so we can make
|
||||||
@ -2016,7 +2009,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct btrfs_free_space_op free_space_op = {
|
static const struct btrfs_free_space_op free_space_op = {
|
||||||
.recalc_thresholds = recalculate_thresholds,
|
.recalc_thresholds = recalculate_thresholds,
|
||||||
.use_bitmap = use_bitmap,
|
.use_bitmap = use_bitmap,
|
||||||
};
|
};
|
||||||
@ -2489,8 +2482,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
|
|||||||
* track of free space, and if we pass 1/2 of that we want to
|
* track of free space, and if we pass 1/2 of that we want to
|
||||||
* start converting things over to using bitmaps
|
* start converting things over to using bitmaps
|
||||||
*/
|
*/
|
||||||
ctl->extents_thresh = ((1024 * 32) / 2) /
|
ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
|
||||||
sizeof(struct btrfs_free_space);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -37,7 +37,7 @@ struct btrfs_free_space_ctl {
|
|||||||
int total_bitmaps;
|
int total_bitmaps;
|
||||||
int unit;
|
int unit;
|
||||||
u64 start;
|
u64 start;
|
||||||
struct btrfs_free_space_op *op;
|
const struct btrfs_free_space_op *op;
|
||||||
void *private;
|
void *private;
|
||||||
struct mutex cache_writeout_mutex;
|
struct mutex cache_writeout_mutex;
|
||||||
struct list_head trimming_ranges;
|
struct list_head trimming_ranges;
|
||||||
|
1591
fs/btrfs/free-space-tree.c
Normal file
1591
fs/btrfs/free-space-tree.c
Normal file
File diff suppressed because it is too large
Load Diff
72
fs/btrfs/free-space-tree.h
Normal file
72
fs/btrfs/free-space-tree.h
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2015 Facebook. All rights reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public
|
||||||
|
* License v2 as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public
|
||||||
|
* License along with this program; if not, write to the
|
||||||
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||||
|
* Boston, MA 021110-1307, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __BTRFS_FREE_SPACE_TREE
|
||||||
|
#define __BTRFS_FREE_SPACE_TREE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The default size for new free space bitmap items. The last bitmap in a block
|
||||||
|
* group may be truncated, and none of the free space tree code assumes that
|
||||||
|
* existing bitmaps are this size.
|
||||||
|
*/
|
||||||
|
#define BTRFS_FREE_SPACE_BITMAP_SIZE 256
|
||||||
|
#define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE)
|
||||||
|
|
||||||
|
void set_free_space_tree_thresholds(struct btrfs_block_group_cache *block_group);
|
||||||
|
int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info);
|
||||||
|
int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info);
|
||||||
|
int load_free_space_tree(struct btrfs_caching_control *caching_ctl);
|
||||||
|
int add_block_group_free_space(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *block_group);
|
||||||
|
int remove_block_group_free_space(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *block_group);
|
||||||
|
int add_to_free_space_tree(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
u64 start, u64 size);
|
||||||
|
int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
u64 start, u64 size);
|
||||||
|
|
||||||
|
/* Exposed for testing. */
|
||||||
|
struct btrfs_free_space_info *
|
||||||
|
search_free_space_info(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *block_group,
|
||||||
|
struct btrfs_path *path, int cow);
|
||||||
|
int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *block_group,
|
||||||
|
struct btrfs_path *path, u64 start, u64 size);
|
||||||
|
int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *block_group,
|
||||||
|
struct btrfs_path *path, u64 start, u64 size);
|
||||||
|
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *block_group,
|
||||||
|
struct btrfs_path *path);
|
||||||
|
int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *block_group,
|
||||||
|
struct btrfs_path *path);
|
||||||
|
int free_space_test_bit(struct btrfs_block_group_cache *block_group,
|
||||||
|
struct btrfs_path *path, u64 offset);
|
||||||
|
|
||||||
|
#endif
|
@ -48,7 +48,7 @@ static int caching_kthread(void *data)
|
|||||||
/* Since the commit root is read-only, we can safely skip locking. */
|
/* Since the commit root is read-only, we can safely skip locking. */
|
||||||
path->skip_locking = 1;
|
path->skip_locking = 1;
|
||||||
path->search_commit_root = 1;
|
path->search_commit_root = 1;
|
||||||
path->reada = 2;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
key.objectid = BTRFS_FIRST_FREE_OBJECTID;
|
key.objectid = BTRFS_FIRST_FREE_OBJECTID;
|
||||||
key.offset = 0;
|
key.offset = 0;
|
||||||
@ -282,7 +282,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
|
#define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space))
|
||||||
#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
|
#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -334,7 +334,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct btrfs_free_space_op free_ino_op = {
|
static const struct btrfs_free_space_op free_ino_op = {
|
||||||
.recalc_thresholds = recalculate_thresholds,
|
.recalc_thresholds = recalculate_thresholds,
|
||||||
.use_bitmap = use_bitmap,
|
.use_bitmap = use_bitmap,
|
||||||
};
|
};
|
||||||
@ -356,7 +356,7 @@ static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct btrfs_free_space_op pinned_free_ino_op = {
|
static const struct btrfs_free_space_op pinned_free_ino_op = {
|
||||||
.recalc_thresholds = pinned_recalc_thresholds,
|
.recalc_thresholds = pinned_recalc_thresholds,
|
||||||
.use_bitmap = pinned_use_bitmap,
|
.use_bitmap = pinned_use_bitmap,
|
||||||
};
|
};
|
||||||
|
310
fs/btrfs/inode.c
310
fs/btrfs/inode.c
@ -66,6 +66,13 @@ struct btrfs_iget_args {
|
|||||||
struct btrfs_root *root;
|
struct btrfs_root *root;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct btrfs_dio_data {
|
||||||
|
u64 outstanding_extents;
|
||||||
|
u64 reserve;
|
||||||
|
u64 unsubmitted_oe_range_start;
|
||||||
|
u64 unsubmitted_oe_range_end;
|
||||||
|
};
|
||||||
|
|
||||||
static const struct inode_operations btrfs_dir_inode_operations;
|
static const struct inode_operations btrfs_dir_inode_operations;
|
||||||
static const struct inode_operations btrfs_symlink_inode_operations;
|
static const struct inode_operations btrfs_symlink_inode_operations;
|
||||||
static const struct inode_operations btrfs_dir_ro_inode_operations;
|
static const struct inode_operations btrfs_dir_ro_inode_operations;
|
||||||
@ -74,17 +81,16 @@ static const struct inode_operations btrfs_file_inode_operations;
|
|||||||
static const struct address_space_operations btrfs_aops;
|
static const struct address_space_operations btrfs_aops;
|
||||||
static const struct address_space_operations btrfs_symlink_aops;
|
static const struct address_space_operations btrfs_symlink_aops;
|
||||||
static const struct file_operations btrfs_dir_file_operations;
|
static const struct file_operations btrfs_dir_file_operations;
|
||||||
static struct extent_io_ops btrfs_extent_io_ops;
|
static const struct extent_io_ops btrfs_extent_io_ops;
|
||||||
|
|
||||||
static struct kmem_cache *btrfs_inode_cachep;
|
static struct kmem_cache *btrfs_inode_cachep;
|
||||||
static struct kmem_cache *btrfs_delalloc_work_cachep;
|
|
||||||
struct kmem_cache *btrfs_trans_handle_cachep;
|
struct kmem_cache *btrfs_trans_handle_cachep;
|
||||||
struct kmem_cache *btrfs_transaction_cachep;
|
struct kmem_cache *btrfs_transaction_cachep;
|
||||||
struct kmem_cache *btrfs_path_cachep;
|
struct kmem_cache *btrfs_path_cachep;
|
||||||
struct kmem_cache *btrfs_free_space_cachep;
|
struct kmem_cache *btrfs_free_space_cachep;
|
||||||
|
|
||||||
#define S_SHIFT 12
|
#define S_SHIFT 12
|
||||||
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
|
static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
|
||||||
[S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
|
[S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
|
||||||
[S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
|
[S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
|
||||||
[S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
|
[S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
|
||||||
@ -414,15 +420,15 @@ static noinline void compress_file_range(struct inode *inode,
|
|||||||
unsigned long nr_pages_ret = 0;
|
unsigned long nr_pages_ret = 0;
|
||||||
unsigned long total_compressed = 0;
|
unsigned long total_compressed = 0;
|
||||||
unsigned long total_in = 0;
|
unsigned long total_in = 0;
|
||||||
unsigned long max_compressed = 128 * 1024;
|
unsigned long max_compressed = SZ_128K;
|
||||||
unsigned long max_uncompressed = 128 * 1024;
|
unsigned long max_uncompressed = SZ_128K;
|
||||||
int i;
|
int i;
|
||||||
int will_compress;
|
int will_compress;
|
||||||
int compress_type = root->fs_info->compress_type;
|
int compress_type = root->fs_info->compress_type;
|
||||||
int redirty = 0;
|
int redirty = 0;
|
||||||
|
|
||||||
/* if this is a small write inside eof, kick off a defrag */
|
/* if this is a small write inside eof, kick off a defrag */
|
||||||
if ((end - start + 1) < 16 * 1024 &&
|
if ((end - start + 1) < SZ_16K &&
|
||||||
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
|
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
|
||||||
btrfs_add_inode_defrag(NULL, inode);
|
btrfs_add_inode_defrag(NULL, inode);
|
||||||
|
|
||||||
@ -430,7 +436,7 @@ static noinline void compress_file_range(struct inode *inode,
|
|||||||
again:
|
again:
|
||||||
will_compress = 0;
|
will_compress = 0;
|
||||||
nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
|
nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
|
||||||
nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
|
nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we don't want to send crud past the end of i_size through
|
* we don't want to send crud past the end of i_size through
|
||||||
@ -944,7 +950,7 @@ static noinline int cow_file_range(struct inode *inode,
|
|||||||
disk_num_bytes = num_bytes;
|
disk_num_bytes = num_bytes;
|
||||||
|
|
||||||
/* if this is a small write inside eof, kick off defrag */
|
/* if this is a small write inside eof, kick off defrag */
|
||||||
if (num_bytes < 64 * 1024 &&
|
if (num_bytes < SZ_64K &&
|
||||||
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
|
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
|
||||||
btrfs_add_inode_defrag(NULL, inode);
|
btrfs_add_inode_defrag(NULL, inode);
|
||||||
|
|
||||||
@ -1107,7 +1113,7 @@ static noinline void async_cow_submit(struct btrfs_work *work)
|
|||||||
* atomic_sub_return implies a barrier for waitqueue_active
|
* atomic_sub_return implies a barrier for waitqueue_active
|
||||||
*/
|
*/
|
||||||
if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
|
if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
|
||||||
5 * 1024 * 1024 &&
|
5 * SZ_1M &&
|
||||||
waitqueue_active(&root->fs_info->async_submit_wait))
|
waitqueue_active(&root->fs_info->async_submit_wait))
|
||||||
wake_up(&root->fs_info->async_submit_wait);
|
wake_up(&root->fs_info->async_submit_wait);
|
||||||
|
|
||||||
@ -1132,7 +1138,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
|
|||||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||||
unsigned long nr_pages;
|
unsigned long nr_pages;
|
||||||
u64 cur_end;
|
u64 cur_end;
|
||||||
int limit = 10 * 1024 * 1024;
|
int limit = 10 * SZ_1M;
|
||||||
|
|
||||||
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
|
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
|
||||||
1, 0, NULL, GFP_NOFS);
|
1, 0, NULL, GFP_NOFS);
|
||||||
@ -1148,7 +1154,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
|
|||||||
!btrfs_test_opt(root, FORCE_COMPRESS))
|
!btrfs_test_opt(root, FORCE_COMPRESS))
|
||||||
cur_end = end;
|
cur_end = end;
|
||||||
else
|
else
|
||||||
cur_end = min(end, start + 512 * 1024 - 1);
|
cur_end = min(end, start + SZ_512K - 1);
|
||||||
|
|
||||||
async_cow->end = cur_end;
|
async_cow->end = cur_end;
|
||||||
INIT_LIST_HEAD(&async_cow->extents);
|
INIT_LIST_HEAD(&async_cow->extents);
|
||||||
@ -1989,7 +1995,7 @@ again:
|
|||||||
page_start = page_offset(page);
|
page_start = page_offset(page);
|
||||||
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
|
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
|
||||||
&cached_state);
|
&cached_state);
|
||||||
|
|
||||||
/* already ordered? We're done */
|
/* already ordered? We're done */
|
||||||
@ -2482,7 +2488,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
|
|||||||
lock_start = backref->file_pos;
|
lock_start = backref->file_pos;
|
||||||
lock_end = backref->file_pos + backref->num_bytes - 1;
|
lock_end = backref->file_pos + backref->num_bytes - 1;
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
|
||||||
0, &cached);
|
&cached);
|
||||||
|
|
||||||
ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
|
ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
|
||||||
if (ordered) {
|
if (ordered) {
|
||||||
@ -2874,7 +2880,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
|
|||||||
|
|
||||||
lock_extent_bits(io_tree, ordered_extent->file_offset,
|
lock_extent_bits(io_tree, ordered_extent->file_offset,
|
||||||
ordered_extent->file_offset + ordered_extent->len - 1,
|
ordered_extent->file_offset + ordered_extent->len - 1,
|
||||||
0, &cached_state);
|
&cached_state);
|
||||||
|
|
||||||
ret = test_range_bit(io_tree, ordered_extent->file_offset,
|
ret = test_range_bit(io_tree, ordered_extent->file_offset,
|
||||||
ordered_extent->file_offset + ordered_extent->len - 1,
|
ordered_extent->file_offset + ordered_extent->len - 1,
|
||||||
@ -3106,55 +3112,47 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
|
|||||||
start, (size_t)(end - start + 1));
|
start, (size_t)(end - start + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct delayed_iput {
|
|
||||||
struct list_head list;
|
|
||||||
struct inode *inode;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* JDM: If this is fs-wide, why can't we add a pointer to
|
|
||||||
* btrfs_inode instead and avoid the allocation? */
|
|
||||||
void btrfs_add_delayed_iput(struct inode *inode)
|
void btrfs_add_delayed_iput(struct inode *inode)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
|
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
|
||||||
struct delayed_iput *delayed;
|
struct btrfs_inode *binode = BTRFS_I(inode);
|
||||||
|
|
||||||
if (atomic_add_unless(&inode->i_count, -1, 1))
|
if (atomic_add_unless(&inode->i_count, -1, 1))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
|
|
||||||
delayed->inode = inode;
|
|
||||||
|
|
||||||
spin_lock(&fs_info->delayed_iput_lock);
|
spin_lock(&fs_info->delayed_iput_lock);
|
||||||
list_add_tail(&delayed->list, &fs_info->delayed_iputs);
|
if (binode->delayed_iput_count == 0) {
|
||||||
|
ASSERT(list_empty(&binode->delayed_iput));
|
||||||
|
list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
|
||||||
|
} else {
|
||||||
|
binode->delayed_iput_count++;
|
||||||
|
}
|
||||||
spin_unlock(&fs_info->delayed_iput_lock);
|
spin_unlock(&fs_info->delayed_iput_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void btrfs_run_delayed_iputs(struct btrfs_root *root)
|
void btrfs_run_delayed_iputs(struct btrfs_root *root)
|
||||||
{
|
{
|
||||||
LIST_HEAD(list);
|
|
||||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||||
struct delayed_iput *delayed;
|
|
||||||
int empty;
|
|
||||||
|
|
||||||
spin_lock(&fs_info->delayed_iput_lock);
|
|
||||||
empty = list_empty(&fs_info->delayed_iputs);
|
|
||||||
spin_unlock(&fs_info->delayed_iput_lock);
|
|
||||||
if (empty)
|
|
||||||
return;
|
|
||||||
|
|
||||||
down_read(&fs_info->delayed_iput_sem);
|
down_read(&fs_info->delayed_iput_sem);
|
||||||
|
|
||||||
spin_lock(&fs_info->delayed_iput_lock);
|
spin_lock(&fs_info->delayed_iput_lock);
|
||||||
list_splice_init(&fs_info->delayed_iputs, &list);
|
while (!list_empty(&fs_info->delayed_iputs)) {
|
||||||
spin_unlock(&fs_info->delayed_iput_lock);
|
struct btrfs_inode *inode;
|
||||||
|
|
||||||
while (!list_empty(&list)) {
|
inode = list_first_entry(&fs_info->delayed_iputs,
|
||||||
delayed = list_entry(list.next, struct delayed_iput, list);
|
struct btrfs_inode, delayed_iput);
|
||||||
list_del(&delayed->list);
|
if (inode->delayed_iput_count) {
|
||||||
iput(delayed->inode);
|
inode->delayed_iput_count--;
|
||||||
kfree(delayed);
|
list_move_tail(&inode->delayed_iput,
|
||||||
|
&fs_info->delayed_iputs);
|
||||||
|
} else {
|
||||||
|
list_del_init(&inode->delayed_iput);
|
||||||
|
}
|
||||||
|
spin_unlock(&fs_info->delayed_iput_lock);
|
||||||
|
iput(&inode->vfs_inode);
|
||||||
|
spin_lock(&fs_info->delayed_iput_lock);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&fs_info->delayed_iput_lock);
|
||||||
up_read(&root->fs_info->delayed_iput_sem);
|
up_read(&root->fs_info->delayed_iput_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3351,7 +3349,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
|
|||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
path->reada = -1;
|
path->reada = READA_BACK;
|
||||||
|
|
||||||
key.objectid = BTRFS_ORPHAN_OBJECTID;
|
key.objectid = BTRFS_ORPHAN_OBJECTID;
|
||||||
key.type = BTRFS_ORPHAN_ITEM_KEY;
|
key.type = BTRFS_ORPHAN_ITEM_KEY;
|
||||||
@ -4318,7 +4316,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
|
|||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
path->reada = -1;
|
path->reada = READA_BACK;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We want to drop from the next block forward in case this new size is
|
* We want to drop from the next block forward in case this new size is
|
||||||
@ -4349,7 +4347,7 @@ search_again:
|
|||||||
* up a huge file in a single leaf. Most of the time that
|
* up a huge file in a single leaf. Most of the time that
|
||||||
* bytes_deleted is > 0, it will be huge by the time we get here
|
* bytes_deleted is > 0, it will be huge by the time we get here
|
||||||
*/
|
*/
|
||||||
if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
|
if (be_nice && bytes_deleted > SZ_32M) {
|
||||||
if (btrfs_should_end_transaction(trans, root)) {
|
if (btrfs_should_end_transaction(trans, root)) {
|
||||||
err = -EAGAIN;
|
err = -EAGAIN;
|
||||||
goto error;
|
goto error;
|
||||||
@ -4592,7 +4590,7 @@ error:
|
|||||||
|
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
|
|
||||||
if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
|
if (be_nice && bytes_deleted > SZ_32M) {
|
||||||
unsigned long updates = trans->delayed_ref_updates;
|
unsigned long updates = trans->delayed_ref_updates;
|
||||||
if (updates) {
|
if (updates) {
|
||||||
trans->delayed_ref_updates = 0;
|
trans->delayed_ref_updates = 0;
|
||||||
@ -4669,7 +4667,7 @@ again:
|
|||||||
}
|
}
|
||||||
wait_on_page_writeback(page);
|
wait_on_page_writeback(page);
|
||||||
|
|
||||||
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
|
lock_extent_bits(io_tree, page_start, page_end, &cached_state);
|
||||||
set_page_extent_mapped(page);
|
set_page_extent_mapped(page);
|
||||||
|
|
||||||
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
||||||
@ -4800,7 +4798,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
|
|||||||
while (1) {
|
while (1) {
|
||||||
struct btrfs_ordered_extent *ordered;
|
struct btrfs_ordered_extent *ordered;
|
||||||
|
|
||||||
lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
|
lock_extent_bits(io_tree, hole_start, block_end - 1,
|
||||||
&cached_state);
|
&cached_state);
|
||||||
ordered = btrfs_lookup_ordered_range(inode, hole_start,
|
ordered = btrfs_lookup_ordered_range(inode, hole_start,
|
||||||
block_end - hole_start);
|
block_end - hole_start);
|
||||||
@ -5112,7 +5110,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
|
|||||||
end = state->end;
|
end = state->end;
|
||||||
spin_unlock(&io_tree->lock);
|
spin_unlock(&io_tree->lock);
|
||||||
|
|
||||||
lock_extent_bits(io_tree, start, end, 0, &cached_state);
|
lock_extent_bits(io_tree, start, end, &cached_state);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If still has DELALLOC flag, the extent didn't reach disk,
|
* If still has DELALLOC flag, the extent didn't reach disk,
|
||||||
@ -5305,7 +5303,6 @@ void btrfs_evict_inode(struct inode *inode)
|
|||||||
no_delete:
|
no_delete:
|
||||||
btrfs_remove_delayed_node(inode);
|
btrfs_remove_delayed_node(inode);
|
||||||
clear_inode(inode);
|
clear_inode(inode);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -5754,7 +5751,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
|
|||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
path->reada = 1;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
if (key_type == BTRFS_DIR_INDEX_KEY) {
|
if (key_type == BTRFS_DIR_INDEX_KEY) {
|
||||||
INIT_LIST_HEAD(&ins_list);
|
INIT_LIST_HEAD(&ins_list);
|
||||||
@ -6482,7 +6479,7 @@ out_unlock_inode:
|
|||||||
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
|
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
|
||||||
struct dentry *dentry)
|
struct dentry *dentry)
|
||||||
{
|
{
|
||||||
struct btrfs_trans_handle *trans;
|
struct btrfs_trans_handle *trans = NULL;
|
||||||
struct btrfs_root *root = BTRFS_I(dir)->root;
|
struct btrfs_root *root = BTRFS_I(dir)->root;
|
||||||
struct inode *inode = d_inode(old_dentry);
|
struct inode *inode = d_inode(old_dentry);
|
||||||
u64 index;
|
u64 index;
|
||||||
@ -6508,6 +6505,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
|
|||||||
trans = btrfs_start_transaction(root, 5);
|
trans = btrfs_start_transaction(root, 5);
|
||||||
if (IS_ERR(trans)) {
|
if (IS_ERR(trans)) {
|
||||||
err = PTR_ERR(trans);
|
err = PTR_ERR(trans);
|
||||||
|
trans = NULL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6541,9 +6539,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
|
|||||||
btrfs_log_new_name(trans, inode, NULL, parent);
|
btrfs_log_new_name(trans, inode, NULL, parent);
|
||||||
}
|
}
|
||||||
|
|
||||||
btrfs_end_transaction(trans, root);
|
|
||||||
btrfs_balance_delayed_items(root);
|
btrfs_balance_delayed_items(root);
|
||||||
fail:
|
fail:
|
||||||
|
if (trans)
|
||||||
|
btrfs_end_transaction(trans, root);
|
||||||
if (drop_inode) {
|
if (drop_inode) {
|
||||||
inode_dec_link_count(inode);
|
inode_dec_link_count(inode);
|
||||||
iput(inode);
|
iput(inode);
|
||||||
@ -6688,7 +6687,7 @@ static int merge_extent_mapping(struct extent_map_tree *em_tree,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static noinline int uncompress_inline(struct btrfs_path *path,
|
static noinline int uncompress_inline(struct btrfs_path *path,
|
||||||
struct inode *inode, struct page *page,
|
struct page *page,
|
||||||
size_t pg_offset, u64 extent_offset,
|
size_t pg_offset, u64 extent_offset,
|
||||||
struct btrfs_file_extent_item *item)
|
struct btrfs_file_extent_item *item)
|
||||||
{
|
{
|
||||||
@ -6785,7 +6784,7 @@ again:
|
|||||||
* Chances are we'll be called again, so go ahead and do
|
* Chances are we'll be called again, so go ahead and do
|
||||||
* readahead
|
* readahead
|
||||||
*/
|
*/
|
||||||
path->reada = 1;
|
path->reada = READA_FORWARD;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_lookup_file_extent(trans, root, path,
|
ret = btrfs_lookup_file_extent(trans, root, path,
|
||||||
@ -6884,8 +6883,7 @@ next:
|
|||||||
if (create == 0 && !PageUptodate(page)) {
|
if (create == 0 && !PageUptodate(page)) {
|
||||||
if (btrfs_file_extent_compression(leaf, item) !=
|
if (btrfs_file_extent_compression(leaf, item) !=
|
||||||
BTRFS_COMPRESS_NONE) {
|
BTRFS_COMPRESS_NONE) {
|
||||||
ret = uncompress_inline(path, inode, page,
|
ret = uncompress_inline(path, page, pg_offset,
|
||||||
pg_offset,
|
|
||||||
extent_offset, item);
|
extent_offset, item);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
err = ret;
|
err = ret;
|
||||||
@ -7381,7 +7379,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
|
|||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
||||||
0, cached_state);
|
cached_state);
|
||||||
/*
|
/*
|
||||||
* We're concerned with the entire range that we're going to be
|
* We're concerned with the entire range that we're going to be
|
||||||
* doing DIO to, so we need to make sure theres no ordered
|
* doing DIO to, so we need to make sure theres no ordered
|
||||||
@ -7409,25 +7407,21 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
|
|||||||
btrfs_start_ordered_extent(inode, ordered, 1);
|
btrfs_start_ordered_extent(inode, ordered, 1);
|
||||||
btrfs_put_ordered_extent(ordered);
|
btrfs_put_ordered_extent(ordered);
|
||||||
} else {
|
} else {
|
||||||
/* Screw you mmap */
|
|
||||||
ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
|
|
||||||
if (ret)
|
|
||||||
break;
|
|
||||||
ret = filemap_fdatawait_range(inode->i_mapping,
|
|
||||||
lockstart,
|
|
||||||
lockend);
|
|
||||||
if (ret)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we found a page that couldn't be invalidated just
|
* We could trigger writeback for this range (and wait
|
||||||
* fall back to buffered.
|
* for it to complete) and then invalidate the pages for
|
||||||
|
* this range (through invalidate_inode_pages2_range()),
|
||||||
|
* but that can lead us to a deadlock with a concurrent
|
||||||
|
* call to readpages() (a buffered read or a defrag call
|
||||||
|
* triggered a readahead) on a page lock due to an
|
||||||
|
* ordered dio extent we created before but did not have
|
||||||
|
* yet a corresponding bio submitted (whence it can not
|
||||||
|
* complete), which makes readpages() wait for that
|
||||||
|
* ordered extent to complete while holding a lock on
|
||||||
|
* that page.
|
||||||
*/
|
*/
|
||||||
ret = invalidate_inode_pages2_range(inode->i_mapping,
|
ret = -ENOTBLK;
|
||||||
lockstart >> PAGE_CACHE_SHIFT,
|
break;
|
||||||
lockend >> PAGE_CACHE_SHIFT);
|
|
||||||
if (ret)
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
@ -7483,11 +7477,6 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
|
|||||||
return em;
|
return em;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct btrfs_dio_data {
|
|
||||||
u64 outstanding_extents;
|
|
||||||
u64 reserve;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void adjust_dio_outstanding_extents(struct inode *inode,
|
static void adjust_dio_outstanding_extents(struct inode *inode,
|
||||||
struct btrfs_dio_data *dio_data,
|
struct btrfs_dio_data *dio_data,
|
||||||
const u64 len)
|
const u64 len)
|
||||||
@ -7671,6 +7660,7 @@ unlock:
|
|||||||
btrfs_free_reserved_data_space(inode, start, len);
|
btrfs_free_reserved_data_space(inode, start, len);
|
||||||
WARN_ON(dio_data->reserve < len);
|
WARN_ON(dio_data->reserve < len);
|
||||||
dio_data->reserve -= len;
|
dio_data->reserve -= len;
|
||||||
|
dio_data->unsubmitted_oe_range_end = start + len;
|
||||||
current->journal_info = dio_data;
|
current->journal_info = dio_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7993,22 +7983,22 @@ static void btrfs_endio_direct_read(struct bio *bio)
|
|||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void btrfs_endio_direct_write(struct bio *bio)
|
static void btrfs_endio_direct_write_update_ordered(struct inode *inode,
|
||||||
|
const u64 offset,
|
||||||
|
const u64 bytes,
|
||||||
|
const int uptodate)
|
||||||
{
|
{
|
||||||
struct btrfs_dio_private *dip = bio->bi_private;
|
|
||||||
struct inode *inode = dip->inode;
|
|
||||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||||
struct btrfs_ordered_extent *ordered = NULL;
|
struct btrfs_ordered_extent *ordered = NULL;
|
||||||
u64 ordered_offset = dip->logical_offset;
|
u64 ordered_offset = offset;
|
||||||
u64 ordered_bytes = dip->bytes;
|
u64 ordered_bytes = bytes;
|
||||||
struct bio *dio_bio;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
again:
|
again:
|
||||||
ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
|
ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
|
||||||
&ordered_offset,
|
&ordered_offset,
|
||||||
ordered_bytes,
|
ordered_bytes,
|
||||||
!bio->bi_error);
|
uptodate);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
goto out_test;
|
goto out_test;
|
||||||
|
|
||||||
@ -8021,13 +8011,22 @@ out_test:
|
|||||||
* our bio might span multiple ordered extents. If we haven't
|
* our bio might span multiple ordered extents. If we haven't
|
||||||
* completed the accounting for the whole dio, go back and try again
|
* completed the accounting for the whole dio, go back and try again
|
||||||
*/
|
*/
|
||||||
if (ordered_offset < dip->logical_offset + dip->bytes) {
|
if (ordered_offset < offset + bytes) {
|
||||||
ordered_bytes = dip->logical_offset + dip->bytes -
|
ordered_bytes = offset + bytes - ordered_offset;
|
||||||
ordered_offset;
|
|
||||||
ordered = NULL;
|
ordered = NULL;
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
dio_bio = dip->dio_bio;
|
}
|
||||||
|
|
||||||
|
static void btrfs_endio_direct_write(struct bio *bio)
|
||||||
|
{
|
||||||
|
struct btrfs_dio_private *dip = bio->bi_private;
|
||||||
|
struct bio *dio_bio = dip->dio_bio;
|
||||||
|
|
||||||
|
btrfs_endio_direct_write_update_ordered(dip->inode,
|
||||||
|
dip->logical_offset,
|
||||||
|
dip->bytes,
|
||||||
|
!bio->bi_error);
|
||||||
|
|
||||||
kfree(dip);
|
kfree(dip);
|
||||||
|
|
||||||
@ -8335,6 +8334,21 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
|
|||||||
dip->subio_endio = btrfs_subio_endio_read;
|
dip->subio_endio = btrfs_subio_endio_read;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reset the range for unsubmitted ordered extents (to a 0 length range)
|
||||||
|
* even if we fail to submit a bio, because in such case we do the
|
||||||
|
* corresponding error handling below and it must not be done a second
|
||||||
|
* time by btrfs_direct_IO().
|
||||||
|
*/
|
||||||
|
if (write) {
|
||||||
|
struct btrfs_dio_data *dio_data = current->journal_info;
|
||||||
|
|
||||||
|
dio_data->unsubmitted_oe_range_end = dip->logical_offset +
|
||||||
|
dip->bytes;
|
||||||
|
dio_data->unsubmitted_oe_range_start =
|
||||||
|
dio_data->unsubmitted_oe_range_end;
|
||||||
|
}
|
||||||
|
|
||||||
ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
|
ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return;
|
return;
|
||||||
@ -8363,24 +8377,15 @@ free_ordered:
|
|||||||
dip = NULL;
|
dip = NULL;
|
||||||
io_bio = NULL;
|
io_bio = NULL;
|
||||||
} else {
|
} else {
|
||||||
if (write) {
|
if (write)
|
||||||
struct btrfs_ordered_extent *ordered;
|
btrfs_endio_direct_write_update_ordered(inode,
|
||||||
|
file_offset,
|
||||||
ordered = btrfs_lookup_ordered_extent(inode,
|
dio_bio->bi_iter.bi_size,
|
||||||
file_offset);
|
0);
|
||||||
set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
|
else
|
||||||
/*
|
|
||||||
* Decrements our ref on the ordered extent and removes
|
|
||||||
* the ordered extent from the inode's ordered tree,
|
|
||||||
* doing all the proper resource cleanup such as for the
|
|
||||||
* reserved space and waking up any waiters for this
|
|
||||||
* ordered extent (through btrfs_remove_ordered_extent).
|
|
||||||
*/
|
|
||||||
btrfs_finish_ordered_io(ordered);
|
|
||||||
} else {
|
|
||||||
unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
|
unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
|
||||||
file_offset + dio_bio->bi_iter.bi_size - 1);
|
file_offset + dio_bio->bi_iter.bi_size - 1);
|
||||||
}
|
|
||||||
dio_bio->bi_error = -EIO;
|
dio_bio->bi_error = -EIO;
|
||||||
/*
|
/*
|
||||||
* Releases and cleans up our dio_bio, no need to bio_put()
|
* Releases and cleans up our dio_bio, no need to bio_put()
|
||||||
@ -8480,6 +8485,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
* originally calculated. Abuse current->journal_info for this.
|
* originally calculated. Abuse current->journal_info for this.
|
||||||
*/
|
*/
|
||||||
dio_data.reserve = round_up(count, root->sectorsize);
|
dio_data.reserve = round_up(count, root->sectorsize);
|
||||||
|
dio_data.unsubmitted_oe_range_start = (u64)offset;
|
||||||
|
dio_data.unsubmitted_oe_range_end = (u64)offset;
|
||||||
current->journal_info = &dio_data;
|
current->journal_info = &dio_data;
|
||||||
} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
|
} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
|
||||||
&BTRFS_I(inode)->runtime_flags)) {
|
&BTRFS_I(inode)->runtime_flags)) {
|
||||||
@ -8498,6 +8505,19 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
if (dio_data.reserve)
|
if (dio_data.reserve)
|
||||||
btrfs_delalloc_release_space(inode, offset,
|
btrfs_delalloc_release_space(inode, offset,
|
||||||
dio_data.reserve);
|
dio_data.reserve);
|
||||||
|
/*
|
||||||
|
* On error we might have left some ordered extents
|
||||||
|
* without submitting corresponding bios for them, so
|
||||||
|
* cleanup them up to avoid other tasks getting them
|
||||||
|
* and waiting for them to complete forever.
|
||||||
|
*/
|
||||||
|
if (dio_data.unsubmitted_oe_range_start <
|
||||||
|
dio_data.unsubmitted_oe_range_end)
|
||||||
|
btrfs_endio_direct_write_update_ordered(inode,
|
||||||
|
dio_data.unsubmitted_oe_range_start,
|
||||||
|
dio_data.unsubmitted_oe_range_end -
|
||||||
|
dio_data.unsubmitted_oe_range_start,
|
||||||
|
0);
|
||||||
} else if (ret >= 0 && (size_t)ret < count)
|
} else if (ret >= 0 && (size_t)ret < count)
|
||||||
btrfs_delalloc_release_space(inode, offset,
|
btrfs_delalloc_release_space(inode, offset,
|
||||||
count - (size_t)ret);
|
count - (size_t)ret);
|
||||||
@ -8535,15 +8555,28 @@ int btrfs_readpage(struct file *file, struct page *page)
|
|||||||
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
|
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
struct extent_io_tree *tree;
|
struct extent_io_tree *tree;
|
||||||
|
struct inode *inode = page->mapping->host;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (current->flags & PF_MEMALLOC) {
|
if (current->flags & PF_MEMALLOC) {
|
||||||
redirty_page_for_writepage(wbc, page);
|
redirty_page_for_writepage(wbc, page);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we are under memory pressure we will call this directly from the
|
||||||
|
* VM, we need to make sure we have the inode referenced for the ordered
|
||||||
|
* extent. If not just return like we didn't do anything.
|
||||||
|
*/
|
||||||
|
if (!igrab(inode)) {
|
||||||
|
redirty_page_for_writepage(wbc, page);
|
||||||
|
return AOP_WRITEPAGE_ACTIVATE;
|
||||||
|
}
|
||||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||||
return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
|
ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
|
||||||
|
btrfs_add_delayed_iput(inode);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btrfs_writepages(struct address_space *mapping,
|
static int btrfs_writepages(struct address_space *mapping,
|
||||||
@ -8615,7 +8648,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!inode_evicting)
|
if (!inode_evicting)
|
||||||
lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
|
lock_extent_bits(tree, page_start, page_end, &cached_state);
|
||||||
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
||||||
if (ordered) {
|
if (ordered) {
|
||||||
/*
|
/*
|
||||||
@ -8653,7 +8686,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
|
|||||||
btrfs_put_ordered_extent(ordered);
|
btrfs_put_ordered_extent(ordered);
|
||||||
if (!inode_evicting) {
|
if (!inode_evicting) {
|
||||||
cached_state = NULL;
|
cached_state = NULL;
|
||||||
lock_extent_bits(tree, page_start, page_end, 0,
|
lock_extent_bits(tree, page_start, page_end,
|
||||||
&cached_state);
|
&cached_state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -8751,7 +8784,7 @@ again:
|
|||||||
}
|
}
|
||||||
wait_on_page_writeback(page);
|
wait_on_page_writeback(page);
|
||||||
|
|
||||||
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
|
lock_extent_bits(io_tree, page_start, page_end, &cached_state);
|
||||||
set_page_extent_mapped(page);
|
set_page_extent_mapped(page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -9025,6 +9058,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
|
|||||||
ei->dir_index = 0;
|
ei->dir_index = 0;
|
||||||
ei->last_unlink_trans = 0;
|
ei->last_unlink_trans = 0;
|
||||||
ei->last_log_commit = 0;
|
ei->last_log_commit = 0;
|
||||||
|
ei->delayed_iput_count = 0;
|
||||||
|
|
||||||
spin_lock_init(&ei->lock);
|
spin_lock_init(&ei->lock);
|
||||||
ei->outstanding_extents = 0;
|
ei->outstanding_extents = 0;
|
||||||
@ -9049,6 +9083,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
|
|||||||
mutex_init(&ei->delalloc_mutex);
|
mutex_init(&ei->delalloc_mutex);
|
||||||
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
|
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
|
||||||
INIT_LIST_HEAD(&ei->delalloc_inodes);
|
INIT_LIST_HEAD(&ei->delalloc_inodes);
|
||||||
|
INIT_LIST_HEAD(&ei->delayed_iput);
|
||||||
RB_CLEAR_NODE(&ei->rb_node);
|
RB_CLEAR_NODE(&ei->rb_node);
|
||||||
|
|
||||||
return inode;
|
return inode;
|
||||||
@ -9153,8 +9188,6 @@ void btrfs_destroy_cachep(void)
|
|||||||
kmem_cache_destroy(btrfs_path_cachep);
|
kmem_cache_destroy(btrfs_path_cachep);
|
||||||
if (btrfs_free_space_cachep)
|
if (btrfs_free_space_cachep)
|
||||||
kmem_cache_destroy(btrfs_free_space_cachep);
|
kmem_cache_destroy(btrfs_free_space_cachep);
|
||||||
if (btrfs_delalloc_work_cachep)
|
|
||||||
kmem_cache_destroy(btrfs_delalloc_work_cachep);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_init_cachep(void)
|
int btrfs_init_cachep(void)
|
||||||
@ -9190,13 +9223,6 @@ int btrfs_init_cachep(void)
|
|||||||
if (!btrfs_free_space_cachep)
|
if (!btrfs_free_space_cachep)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
|
|
||||||
sizeof(struct btrfs_delalloc_work), 0,
|
|
||||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
|
|
||||||
NULL);
|
|
||||||
if (!btrfs_delalloc_work_cachep)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
fail:
|
fail:
|
||||||
btrfs_destroy_cachep();
|
btrfs_destroy_cachep();
|
||||||
@ -9420,14 +9446,10 @@ static void btrfs_run_delalloc_work(struct btrfs_work *work)
|
|||||||
delalloc_work = container_of(work, struct btrfs_delalloc_work,
|
delalloc_work = container_of(work, struct btrfs_delalloc_work,
|
||||||
work);
|
work);
|
||||||
inode = delalloc_work->inode;
|
inode = delalloc_work->inode;
|
||||||
if (delalloc_work->wait) {
|
filemap_flush(inode->i_mapping);
|
||||||
btrfs_wait_ordered_range(inode, 0, (u64)-1);
|
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
|
||||||
} else {
|
&BTRFS_I(inode)->runtime_flags))
|
||||||
filemap_flush(inode->i_mapping);
|
filemap_flush(inode->i_mapping);
|
||||||
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
|
|
||||||
&BTRFS_I(inode)->runtime_flags))
|
|
||||||
filemap_flush(inode->i_mapping);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (delalloc_work->delay_iput)
|
if (delalloc_work->delay_iput)
|
||||||
btrfs_add_delayed_iput(inode);
|
btrfs_add_delayed_iput(inode);
|
||||||
@ -9437,18 +9459,17 @@ static void btrfs_run_delalloc_work(struct btrfs_work *work)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
|
struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
|
||||||
int wait, int delay_iput)
|
int delay_iput)
|
||||||
{
|
{
|
||||||
struct btrfs_delalloc_work *work;
|
struct btrfs_delalloc_work *work;
|
||||||
|
|
||||||
work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
|
work = kmalloc(sizeof(*work), GFP_NOFS);
|
||||||
if (!work)
|
if (!work)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
init_completion(&work->completion);
|
init_completion(&work->completion);
|
||||||
INIT_LIST_HEAD(&work->list);
|
INIT_LIST_HEAD(&work->list);
|
||||||
work->inode = inode;
|
work->inode = inode;
|
||||||
work->wait = wait;
|
|
||||||
work->delay_iput = delay_iput;
|
work->delay_iput = delay_iput;
|
||||||
WARN_ON_ONCE(!inode);
|
WARN_ON_ONCE(!inode);
|
||||||
btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
|
btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
|
||||||
@ -9460,7 +9481,7 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
|
|||||||
void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
|
void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
|
||||||
{
|
{
|
||||||
wait_for_completion(&work->completion);
|
wait_for_completion(&work->completion);
|
||||||
kmem_cache_free(btrfs_delalloc_work_cachep, work);
|
kfree(work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -9496,7 +9517,7 @@ static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
|
|||||||
}
|
}
|
||||||
spin_unlock(&root->delalloc_lock);
|
spin_unlock(&root->delalloc_lock);
|
||||||
|
|
||||||
work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
|
work = btrfs_alloc_delalloc_work(inode, delay_iput);
|
||||||
if (!work) {
|
if (!work) {
|
||||||
if (delay_iput)
|
if (delay_iput)
|
||||||
btrfs_add_delayed_iput(inode);
|
btrfs_add_delayed_iput(inode);
|
||||||
@ -9638,9 +9659,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
|
|||||||
/*
|
/*
|
||||||
* 2 items for inode item and ref
|
* 2 items for inode item and ref
|
||||||
* 2 items for dir items
|
* 2 items for dir items
|
||||||
|
* 1 item for updating parent inode item
|
||||||
|
* 1 item for the inline extent item
|
||||||
* 1 item for xattr if selinux is on
|
* 1 item for xattr if selinux is on
|
||||||
*/
|
*/
|
||||||
trans = btrfs_start_transaction(root, 5);
|
trans = btrfs_start_transaction(root, 7);
|
||||||
if (IS_ERR(trans))
|
if (IS_ERR(trans))
|
||||||
return PTR_ERR(trans);
|
return PTR_ERR(trans);
|
||||||
|
|
||||||
@ -9671,10 +9694,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
|
|||||||
if (err)
|
if (err)
|
||||||
goto out_unlock_inode;
|
goto out_unlock_inode;
|
||||||
|
|
||||||
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
|
|
||||||
if (err)
|
|
||||||
goto out_unlock_inode;
|
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path) {
|
if (!path) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
@ -9712,6 +9731,13 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
|
|||||||
inode_set_bytes(inode, name_len);
|
inode_set_bytes(inode, name_len);
|
||||||
btrfs_i_size_write(inode, name_len);
|
btrfs_i_size_write(inode, name_len);
|
||||||
err = btrfs_update_inode(trans, root, inode);
|
err = btrfs_update_inode(trans, root, inode);
|
||||||
|
/*
|
||||||
|
* Last step, add directory indexes for our symlink inode. This is the
|
||||||
|
* last step to avoid extra cleanup of these indexes if an error happens
|
||||||
|
* elsewhere above.
|
||||||
|
*/
|
||||||
|
if (!err)
|
||||||
|
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
|
||||||
if (err) {
|
if (err) {
|
||||||
drop_inode = 1;
|
drop_inode = 1;
|
||||||
goto out_unlock_inode;
|
goto out_unlock_inode;
|
||||||
@ -9762,7 +9788,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
|
cur_bytes = min_t(u64, num_bytes, SZ_256M);
|
||||||
cur_bytes = max(cur_bytes, min_size);
|
cur_bytes = max(cur_bytes, min_size);
|
||||||
/*
|
/*
|
||||||
* If we are severely fragmented we could end up with really
|
* If we are severely fragmented we could end up with really
|
||||||
@ -10026,7 +10052,7 @@ static const struct file_operations btrfs_dir_file_operations = {
|
|||||||
.fsync = btrfs_sync_file,
|
.fsync = btrfs_sync_file,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct extent_io_ops btrfs_extent_io_ops = {
|
static const struct extent_io_ops btrfs_extent_io_ops = {
|
||||||
.fill_delalloc = run_delalloc_range,
|
.fill_delalloc = run_delalloc_range,
|
||||||
.submit_bio_hook = btrfs_submit_bio_hook,
|
.submit_bio_hook = btrfs_submit_bio_hook,
|
||||||
.merge_bio_hook = btrfs_merge_bio_hook,
|
.merge_bio_hook = btrfs_merge_bio_hook,
|
||||||
|
@ -655,22 +655,28 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
|
|||||||
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
|
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
|
||||||
|
if (!pending_snapshot)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
|
||||||
|
GFP_NOFS);
|
||||||
|
pending_snapshot->path = btrfs_alloc_path();
|
||||||
|
if (!pending_snapshot->root_item || !pending_snapshot->path) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto free_pending;
|
||||||
|
}
|
||||||
|
|
||||||
atomic_inc(&root->will_be_snapshoted);
|
atomic_inc(&root->will_be_snapshoted);
|
||||||
smp_mb__after_atomic();
|
smp_mb__after_atomic();
|
||||||
btrfs_wait_for_no_snapshoting_writes(root);
|
btrfs_wait_for_no_snapshoting_writes(root);
|
||||||
|
|
||||||
ret = btrfs_start_delalloc_inodes(root, 0);
|
ret = btrfs_start_delalloc_inodes(root, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto dec_and_free;
|
||||||
|
|
||||||
btrfs_wait_ordered_extents(root, -1);
|
btrfs_wait_ordered_extents(root, -1);
|
||||||
|
|
||||||
pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
|
|
||||||
if (!pending_snapshot) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
btrfs_init_block_rsv(&pending_snapshot->block_rsv,
|
btrfs_init_block_rsv(&pending_snapshot->block_rsv,
|
||||||
BTRFS_BLOCK_RSV_TEMP);
|
BTRFS_BLOCK_RSV_TEMP);
|
||||||
/*
|
/*
|
||||||
@ -686,7 +692,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
|
|||||||
&pending_snapshot->qgroup_reserved,
|
&pending_snapshot->qgroup_reserved,
|
||||||
false);
|
false);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free;
|
goto dec_and_free;
|
||||||
|
|
||||||
pending_snapshot->dentry = dentry;
|
pending_snapshot->dentry = dentry;
|
||||||
pending_snapshot->root = root;
|
pending_snapshot->root = root;
|
||||||
@ -737,11 +743,14 @@ fail:
|
|||||||
btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
|
btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
|
||||||
&pending_snapshot->block_rsv,
|
&pending_snapshot->block_rsv,
|
||||||
pending_snapshot->qgroup_reserved);
|
pending_snapshot->qgroup_reserved);
|
||||||
free:
|
dec_and_free:
|
||||||
kfree(pending_snapshot);
|
|
||||||
out:
|
|
||||||
if (atomic_dec_and_test(&root->will_be_snapshoted))
|
if (atomic_dec_and_test(&root->will_be_snapshoted))
|
||||||
wake_up_atomic_t(&root->will_be_snapshoted);
|
wake_up_atomic_t(&root->will_be_snapshoted);
|
||||||
|
free_pending:
|
||||||
|
kfree(pending_snapshot->root_item);
|
||||||
|
btrfs_free_path(pending_snapshot->path);
|
||||||
|
kfree(pending_snapshot);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -992,7 +1001,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
|
|||||||
u64 end = start + len - 1;
|
u64 end = start + len - 1;
|
||||||
|
|
||||||
/* get the big lock and read metadata off disk */
|
/* get the big lock and read metadata off disk */
|
||||||
lock_extent_bits(io_tree, start, end, 0, &cached);
|
lock_extent_bits(io_tree, start, end, &cached);
|
||||||
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
|
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
|
||||||
unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
|
unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
|
||||||
|
|
||||||
@ -1016,7 +1025,7 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
|
|||||||
if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
|
if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
|
||||||
ret = false;
|
ret = false;
|
||||||
else if ((em->block_start + em->block_len == next->block_start) &&
|
else if ((em->block_start + em->block_len == next->block_start) &&
|
||||||
(em->block_len > 128 * 1024 && next->block_len > 128 * 1024))
|
(em->block_len > SZ_128K && next->block_len > SZ_128K))
|
||||||
ret = false;
|
ret = false;
|
||||||
|
|
||||||
free_extent_map(next);
|
free_extent_map(next);
|
||||||
@ -1140,7 +1149,7 @@ again:
|
|||||||
page_end = page_start + PAGE_CACHE_SIZE - 1;
|
page_end = page_start + PAGE_CACHE_SIZE - 1;
|
||||||
while (1) {
|
while (1) {
|
||||||
lock_extent_bits(tree, page_start, page_end,
|
lock_extent_bits(tree, page_start, page_end,
|
||||||
0, &cached_state);
|
&cached_state);
|
||||||
ordered = btrfs_lookup_ordered_extent(inode,
|
ordered = btrfs_lookup_ordered_extent(inode,
|
||||||
page_start);
|
page_start);
|
||||||
unlock_extent_cached(tree, page_start, page_end,
|
unlock_extent_cached(tree, page_start, page_end,
|
||||||
@ -1200,7 +1209,7 @@ again:
|
|||||||
page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
|
page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree,
|
||||||
page_start, page_end - 1, 0, &cached_state);
|
page_start, page_end - 1, &cached_state);
|
||||||
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
|
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
|
||||||
page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
|
page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||||
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
|
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
|
||||||
@ -1262,9 +1271,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
|||||||
int defrag_count = 0;
|
int defrag_count = 0;
|
||||||
int compress_type = BTRFS_COMPRESS_ZLIB;
|
int compress_type = BTRFS_COMPRESS_ZLIB;
|
||||||
u32 extent_thresh = range->extent_thresh;
|
u32 extent_thresh = range->extent_thresh;
|
||||||
unsigned long max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
|
unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT;
|
||||||
unsigned long cluster = max_cluster;
|
unsigned long cluster = max_cluster;
|
||||||
u64 new_align = ~((u64)128 * 1024 - 1);
|
u64 new_align = ~((u64)SZ_128K - 1);
|
||||||
struct page **pages = NULL;
|
struct page **pages = NULL;
|
||||||
|
|
||||||
if (isize == 0)
|
if (isize == 0)
|
||||||
@ -1281,7 +1290,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (extent_thresh == 0)
|
if (extent_thresh == 0)
|
||||||
extent_thresh = 256 * 1024;
|
extent_thresh = SZ_256K;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if we were not given a file, allocate a readahead
|
* if we were not given a file, allocate a readahead
|
||||||
@ -1313,7 +1322,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
|||||||
|
|
||||||
if (newer_than) {
|
if (newer_than) {
|
||||||
ret = find_new_extents(root, inode, newer_than,
|
ret = find_new_extents(root, inode, newer_than,
|
||||||
&newer_off, 64 * 1024);
|
&newer_off, SZ_64K);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
range->start = newer_off;
|
range->start = newer_off;
|
||||||
/*
|
/*
|
||||||
@ -1403,9 +1412,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
|||||||
newer_off = max(newer_off + 1,
|
newer_off = max(newer_off + 1,
|
||||||
(u64)i << PAGE_CACHE_SHIFT);
|
(u64)i << PAGE_CACHE_SHIFT);
|
||||||
|
|
||||||
ret = find_new_extents(root, inode,
|
ret = find_new_extents(root, inode, newer_than,
|
||||||
newer_than, &newer_off,
|
&newer_off, SZ_64K);
|
||||||
64 * 1024);
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
range->start = newer_off;
|
range->start = newer_off;
|
||||||
i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
|
i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
|
||||||
@ -1571,7 +1579,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
|||||||
new_size = old_size + new_size;
|
new_size = old_size + new_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (new_size < 256 * 1024 * 1024) {
|
if (new_size < SZ_256M) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
@ -2160,7 +2168,7 @@ static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
|
|||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
int ret;
|
int ret;
|
||||||
size_t buf_size;
|
size_t buf_size;
|
||||||
const size_t buf_limit = 16 * 1024 * 1024;
|
const size_t buf_limit = SZ_16M;
|
||||||
|
|
||||||
if (!capable(CAP_SYS_ADMIN))
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
@ -3096,7 +3104,7 @@ out_unlock:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define BTRFS_MAX_DEDUPE_LEN (16 * 1024 * 1024)
|
#define BTRFS_MAX_DEDUPE_LEN SZ_16M
|
||||||
|
|
||||||
ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
|
ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
|
||||||
struct file *dst_file, u64 dst_loff)
|
struct file *dst_file, u64 dst_loff)
|
||||||
@ -3396,7 +3404,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
path->reada = 2;
|
path->reada = READA_FORWARD;
|
||||||
/* clone data */
|
/* clone data */
|
||||||
key.objectid = btrfs_ino(src);
|
key.objectid = btrfs_ino(src);
|
||||||
key.type = BTRFS_EXTENT_DATA_KEY;
|
key.type = BTRFS_EXTENT_DATA_KEY;
|
||||||
@ -4039,7 +4047,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
space_args.total_spaces = 0;
|
space_args.total_spaces = 0;
|
||||||
dest = kmalloc(alloc_size, GFP_NOFS);
|
dest = kmalloc(alloc_size, GFP_KERNEL);
|
||||||
if (!dest)
|
if (!dest)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
dest_orig = dest;
|
dest_orig = dest;
|
||||||
@ -4416,7 +4424,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
size = min_t(u32, loi->size, 64 * 1024);
|
size = min_t(u32, loi->size, SZ_64K);
|
||||||
inodes = init_data_container(size);
|
inodes = init_data_container(size);
|
||||||
if (IS_ERR(inodes)) {
|
if (IS_ERR(inodes)) {
|
||||||
ret = PTR_ERR(inodes);
|
ret = PTR_ERR(inodes);
|
||||||
@ -4565,7 +4573,7 @@ locked:
|
|||||||
goto out_bargs;
|
goto out_bargs;
|
||||||
}
|
}
|
||||||
|
|
||||||
bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
|
bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
|
||||||
if (!bctl) {
|
if (!bctl) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_bargs;
|
goto out_bargs;
|
||||||
@ -4651,7 +4659,7 @@ static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
bargs = kzalloc(sizeof(*bargs), GFP_NOFS);
|
bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
|
||||||
if (!bargs) {
|
if (!bargs) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
@ -4911,7 +4919,7 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
|
|||||||
if (!capable(CAP_SYS_ADMIN))
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
qsa = kzalloc(sizeof(*qsa), GFP_NOFS);
|
qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
|
||||||
if (!qsa)
|
if (!qsa)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -5041,7 +5049,7 @@ static long btrfs_ioctl_set_received_subvol_32(struct file *file,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
args64 = kmalloc(sizeof(*args64), GFP_NOFS);
|
args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
|
||||||
if (!args64) {
|
if (!args64) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
@ -5178,7 +5186,7 @@ out_unlock:
|
|||||||
static int btrfs_ioctl_get_supported_features(struct file *file,
|
static int btrfs_ioctl_get_supported_features(struct file *file,
|
||||||
void __user *arg)
|
void __user *arg)
|
||||||
{
|
{
|
||||||
static struct btrfs_ioctl_feature_flags features[3] = {
|
static const struct btrfs_ioctl_feature_flags features[3] = {
|
||||||
INIT_FEATURE_FLAGS(SUPP),
|
INIT_FEATURE_FLAGS(SUPP),
|
||||||
INIT_FEATURE_FLAGS(SAFE_SET),
|
INIT_FEATURE_FLAGS(SAFE_SET),
|
||||||
INIT_FEATURE_FLAGS(SAFE_CLEAR)
|
INIT_FEATURE_FLAGS(SAFE_CLEAR)
|
||||||
|
@ -56,7 +56,6 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
|
|||||||
atomic_dec(&eb->spinning_readers);
|
atomic_dec(&eb->spinning_readers);
|
||||||
read_unlock(&eb->lock);
|
read_unlock(&eb->lock);
|
||||||
}
|
}
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -96,7 +95,6 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
|
|||||||
waitqueue_active(&eb->read_lock_wq))
|
waitqueue_active(&eb->read_lock_wq))
|
||||||
wake_up(&eb->read_lock_wq);
|
wake_up(&eb->read_lock_wq);
|
||||||
}
|
}
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -503,7 +503,6 @@ static void cache_rbio(struct btrfs_raid_bio *rbio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&table->cache_lock, flags);
|
spin_unlock_irqrestore(&table->cache_lock, flags);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -906,7 +905,6 @@ static void raid_write_end_io(struct bio *bio)
|
|||||||
err = -EIO;
|
err = -EIO;
|
||||||
|
|
||||||
rbio_orig_end_io(rbio, err);
|
rbio_orig_end_io(rbio, err);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -708,8 +708,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
|
|||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
path1->reada = 1;
|
path1->reada = READA_FORWARD;
|
||||||
path2->reada = 2;
|
path2->reada = READA_FORWARD;
|
||||||
|
|
||||||
node = alloc_backref_node(cache);
|
node = alloc_backref_node(cache);
|
||||||
if (!node) {
|
if (!node) {
|
||||||
@ -2130,7 +2130,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
|
|||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
path->reada = 1;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
reloc_root = root->reloc_root;
|
reloc_root = root->reloc_root;
|
||||||
root_item = &reloc_root->root_item;
|
root_item = &reloc_root->root_item;
|
||||||
@ -3527,7 +3527,7 @@ static int find_data_references(struct reloc_control *rc,
|
|||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
path->reada = 1;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
root = read_fs_root(rc->extent_root->fs_info, ref_root);
|
root = read_fs_root(rc->extent_root->fs_info, ref_root);
|
||||||
if (IS_ERR(root)) {
|
if (IS_ERR(root)) {
|
||||||
@ -3917,7 +3917,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
|
|||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
path->reada = 1;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
ret = prepare_to_relocate(rc);
|
ret = prepare_to_relocate(rc);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -4343,7 +4343,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
|
|||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
path->reada = -1;
|
path->reada = READA_BACK;
|
||||||
|
|
||||||
key.objectid = BTRFS_TREE_RELOC_OBJECTID;
|
key.objectid = BTRFS_TREE_RELOC_OBJECTID;
|
||||||
key.type = BTRFS_ROOT_ITEM_KEY;
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
||||||
|
@ -1514,8 +1514,6 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
|
|||||||
|
|
||||||
if (sblock->no_io_error_seen)
|
if (sblock->no_io_error_seen)
|
||||||
scrub_recheck_block_checksum(sblock);
|
scrub_recheck_block_checksum(sblock);
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int scrub_check_fsid(u8 fsid[],
|
static inline int scrub_check_fsid(u8 fsid[],
|
||||||
@ -3507,7 +3505,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
|||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
path->reada = 2;
|
path->reada = READA_FORWARD;
|
||||||
path->search_commit_root = 1;
|
path->search_commit_root = 1;
|
||||||
path->skip_locking = 1;
|
path->skip_locking = 1;
|
||||||
|
|
||||||
@ -3735,27 +3733,27 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
|
|||||||
if (fs_info->scrub_workers_refcnt == 0) {
|
if (fs_info->scrub_workers_refcnt == 0) {
|
||||||
if (is_dev_replace)
|
if (is_dev_replace)
|
||||||
fs_info->scrub_workers =
|
fs_info->scrub_workers =
|
||||||
btrfs_alloc_workqueue("btrfs-scrub", flags,
|
btrfs_alloc_workqueue("scrub", flags,
|
||||||
1, 4);
|
1, 4);
|
||||||
else
|
else
|
||||||
fs_info->scrub_workers =
|
fs_info->scrub_workers =
|
||||||
btrfs_alloc_workqueue("btrfs-scrub", flags,
|
btrfs_alloc_workqueue("scrub", flags,
|
||||||
max_active, 4);
|
max_active, 4);
|
||||||
if (!fs_info->scrub_workers)
|
if (!fs_info->scrub_workers)
|
||||||
goto fail_scrub_workers;
|
goto fail_scrub_workers;
|
||||||
|
|
||||||
fs_info->scrub_wr_completion_workers =
|
fs_info->scrub_wr_completion_workers =
|
||||||
btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
|
btrfs_alloc_workqueue("scrubwrc", flags,
|
||||||
max_active, 2);
|
max_active, 2);
|
||||||
if (!fs_info->scrub_wr_completion_workers)
|
if (!fs_info->scrub_wr_completion_workers)
|
||||||
goto fail_scrub_wr_completion_workers;
|
goto fail_scrub_wr_completion_workers;
|
||||||
|
|
||||||
fs_info->scrub_nocow_workers =
|
fs_info->scrub_nocow_workers =
|
||||||
btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
|
btrfs_alloc_workqueue("scrubnc", flags, 1, 0);
|
||||||
if (!fs_info->scrub_nocow_workers)
|
if (!fs_info->scrub_nocow_workers)
|
||||||
goto fail_scrub_nocow_workers;
|
goto fail_scrub_nocow_workers;
|
||||||
fs_info->scrub_parity_workers =
|
fs_info->scrub_parity_workers =
|
||||||
btrfs_alloc_workqueue("btrfs-scrubparity", flags,
|
btrfs_alloc_workqueue("scrubparity", flags,
|
||||||
max_active, 2);
|
max_active, 2);
|
||||||
if (!fs_info->scrub_parity_workers)
|
if (!fs_info->scrub_parity_workers)
|
||||||
goto fail_scrub_parity_workers;
|
goto fail_scrub_parity_workers;
|
||||||
@ -4211,7 +4209,7 @@ static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
|
|||||||
|
|
||||||
io_tree = &BTRFS_I(inode)->io_tree;
|
io_tree = &BTRFS_I(inode)->io_tree;
|
||||||
|
|
||||||
lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
|
lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
|
||||||
ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
|
ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
|
||||||
if (ordered) {
|
if (ordered) {
|
||||||
btrfs_put_ordered_extent(ordered);
|
btrfs_put_ordered_extent(ordered);
|
||||||
|
@ -1469,7 +1469,21 @@ static int read_symlink(struct btrfs_root *root,
|
|||||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
BUG_ON(ret);
|
if (ret) {
|
||||||
|
/*
|
||||||
|
* An empty symlink inode. Can happen in rare error paths when
|
||||||
|
* creating a symlink (transaction committed before the inode
|
||||||
|
* eviction handler removed the symlink inode items and a crash
|
||||||
|
* happened in between or the subvol was snapshoted in between).
|
||||||
|
* Print an informative message to dmesg/syslog so that the user
|
||||||
|
* can delete the symlink.
|
||||||
|
*/
|
||||||
|
btrfs_err(root->fs_info,
|
||||||
|
"Found empty symlink inode %llu at root %llu",
|
||||||
|
ino, root->root_key.objectid);
|
||||||
|
ret = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
||||||
struct btrfs_file_extent_item);
|
struct btrfs_file_extent_item);
|
||||||
|
@ -22,8 +22,8 @@
|
|||||||
#define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
|
#define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
|
||||||
#define BTRFS_SEND_STREAM_VERSION 1
|
#define BTRFS_SEND_STREAM_VERSION 1
|
||||||
|
|
||||||
#define BTRFS_SEND_BUF_SIZE (1024 * 64)
|
#define BTRFS_SEND_BUF_SIZE SZ_64K
|
||||||
#define BTRFS_SEND_READ_SIZE (1024 * 48)
|
#define BTRFS_SEND_READ_SIZE (48 * SZ_1K)
|
||||||
|
|
||||||
enum btrfs_tlv_type {
|
enum btrfs_tlv_type {
|
||||||
BTRFS_TLV_U8,
|
BTRFS_TLV_U8,
|
||||||
|
@ -295,10 +295,11 @@ enum {
|
|||||||
Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
|
Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
|
||||||
Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
|
Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
|
||||||
Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
|
Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
|
||||||
Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
|
Opt_space_cache, Opt_space_cache_version, Opt_clear_cache,
|
||||||
Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache,
|
Opt_user_subvol_rm_allowed, Opt_enospc_debug, Opt_subvolrootid,
|
||||||
Opt_no_space_cache, Opt_recovery, Opt_skip_balance,
|
Opt_defrag, Opt_inode_cache, Opt_no_space_cache, Opt_recovery,
|
||||||
Opt_check_integrity, Opt_check_integrity_including_extent_data,
|
Opt_skip_balance, Opt_check_integrity,
|
||||||
|
Opt_check_integrity_including_extent_data,
|
||||||
Opt_check_integrity_print_mask, Opt_fatal_errors, Opt_rescan_uuid_tree,
|
Opt_check_integrity_print_mask, Opt_fatal_errors, Opt_rescan_uuid_tree,
|
||||||
Opt_commit_interval, Opt_barrier, Opt_nodefrag, Opt_nodiscard,
|
Opt_commit_interval, Opt_barrier, Opt_nodefrag, Opt_nodiscard,
|
||||||
Opt_noenospc_debug, Opt_noflushoncommit, Opt_acl, Opt_datacow,
|
Opt_noenospc_debug, Opt_noflushoncommit, Opt_acl, Opt_datacow,
|
||||||
@ -309,7 +310,7 @@ enum {
|
|||||||
Opt_err,
|
Opt_err,
|
||||||
};
|
};
|
||||||
|
|
||||||
static match_table_t tokens = {
|
static const match_table_t tokens = {
|
||||||
{Opt_degraded, "degraded"},
|
{Opt_degraded, "degraded"},
|
||||||
{Opt_subvol, "subvol=%s"},
|
{Opt_subvol, "subvol=%s"},
|
||||||
{Opt_subvolid, "subvolid=%s"},
|
{Opt_subvolid, "subvolid=%s"},
|
||||||
@ -340,6 +341,7 @@ static match_table_t tokens = {
|
|||||||
{Opt_discard, "discard"},
|
{Opt_discard, "discard"},
|
||||||
{Opt_nodiscard, "nodiscard"},
|
{Opt_nodiscard, "nodiscard"},
|
||||||
{Opt_space_cache, "space_cache"},
|
{Opt_space_cache, "space_cache"},
|
||||||
|
{Opt_space_cache_version, "space_cache=%s"},
|
||||||
{Opt_clear_cache, "clear_cache"},
|
{Opt_clear_cache, "clear_cache"},
|
||||||
{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
|
{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
|
||||||
{Opt_enospc_debug, "enospc_debug"},
|
{Opt_enospc_debug, "enospc_debug"},
|
||||||
@ -383,7 +385,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
|
|||||||
bool compress_force = false;
|
bool compress_force = false;
|
||||||
|
|
||||||
cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
|
cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
|
||||||
if (cache_gen)
|
if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE))
|
||||||
|
btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
|
||||||
|
else if (cache_gen)
|
||||||
btrfs_set_opt(info->mount_opt, SPACE_CACHE);
|
btrfs_set_opt(info->mount_opt, SPACE_CACHE);
|
||||||
|
|
||||||
if (!options)
|
if (!options)
|
||||||
@ -617,15 +621,35 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
|
|||||||
"turning off discard");
|
"turning off discard");
|
||||||
break;
|
break;
|
||||||
case Opt_space_cache:
|
case Opt_space_cache:
|
||||||
btrfs_set_and_info(root, SPACE_CACHE,
|
case Opt_space_cache_version:
|
||||||
"enabling disk space caching");
|
if (token == Opt_space_cache ||
|
||||||
|
strcmp(args[0].from, "v1") == 0) {
|
||||||
|
btrfs_clear_opt(root->fs_info->mount_opt,
|
||||||
|
FREE_SPACE_TREE);
|
||||||
|
btrfs_set_and_info(root, SPACE_CACHE,
|
||||||
|
"enabling disk space caching");
|
||||||
|
} else if (strcmp(args[0].from, "v2") == 0) {
|
||||||
|
btrfs_clear_opt(root->fs_info->mount_opt,
|
||||||
|
SPACE_CACHE);
|
||||||
|
btrfs_set_and_info(root, FREE_SPACE_TREE,
|
||||||
|
"enabling free space tree");
|
||||||
|
} else {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case Opt_rescan_uuid_tree:
|
case Opt_rescan_uuid_tree:
|
||||||
btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
|
btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
|
||||||
break;
|
break;
|
||||||
case Opt_no_space_cache:
|
case Opt_no_space_cache:
|
||||||
btrfs_clear_and_info(root, SPACE_CACHE,
|
if (btrfs_test_opt(root, SPACE_CACHE)) {
|
||||||
"disabling disk space caching");
|
btrfs_clear_and_info(root, SPACE_CACHE,
|
||||||
|
"disabling disk space caching");
|
||||||
|
}
|
||||||
|
if (btrfs_test_opt(root, FREE_SPACE_TREE)) {
|
||||||
|
btrfs_clear_and_info(root, FREE_SPACE_TREE,
|
||||||
|
"disabling free space tree");
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case Opt_inode_cache:
|
case Opt_inode_cache:
|
||||||
btrfs_set_pending_and_info(info, INODE_MAP_CACHE,
|
btrfs_set_pending_and_info(info, INODE_MAP_CACHE,
|
||||||
@ -754,8 +778,17 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE) &&
|
||||||
|
!btrfs_test_opt(root, FREE_SPACE_TREE) &&
|
||||||
|
!btrfs_test_opt(root, CLEAR_CACHE)) {
|
||||||
|
btrfs_err(root->fs_info, "cannot disable free space tree");
|
||||||
|
ret = -EINVAL;
|
||||||
|
|
||||||
|
}
|
||||||
if (!ret && btrfs_test_opt(root, SPACE_CACHE))
|
if (!ret && btrfs_test_opt(root, SPACE_CACHE))
|
||||||
btrfs_info(root->fs_info, "disk space caching is enabled");
|
btrfs_info(root->fs_info, "disk space caching is enabled");
|
||||||
|
if (!ret && btrfs_test_opt(root, FREE_SPACE_TREE))
|
||||||
|
btrfs_info(root->fs_info, "using free space tree");
|
||||||
kfree(orig);
|
kfree(orig);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1162,6 +1195,8 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
|
|||||||
seq_puts(seq, ",noacl");
|
seq_puts(seq, ",noacl");
|
||||||
if (btrfs_test_opt(root, SPACE_CACHE))
|
if (btrfs_test_opt(root, SPACE_CACHE))
|
||||||
seq_puts(seq, ",space_cache");
|
seq_puts(seq, ",space_cache");
|
||||||
|
else if (btrfs_test_opt(root, FREE_SPACE_TREE))
|
||||||
|
seq_puts(seq, ",space_cache=v2");
|
||||||
else
|
else
|
||||||
seq_puts(seq, ",nospace_cache");
|
seq_puts(seq, ",nospace_cache");
|
||||||
if (btrfs_test_opt(root, RESCAN_UUID_TREE))
|
if (btrfs_test_opt(root, RESCAN_UUID_TREE))
|
||||||
@ -1863,7 +1898,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
|
|||||||
* btrfs starts at an offset of at least 1MB when doing chunk
|
* btrfs starts at an offset of at least 1MB when doing chunk
|
||||||
* allocation.
|
* allocation.
|
||||||
*/
|
*/
|
||||||
skip_space = 1024 * 1024;
|
skip_space = SZ_1M;
|
||||||
|
|
||||||
/* user can set the offset in fs_info->alloc_start. */
|
/* user can set the offset in fs_info->alloc_start. */
|
||||||
if (fs_info->alloc_start &&
|
if (fs_info->alloc_start &&
|
||||||
@ -1954,6 +1989,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
|
|||||||
* there are other factors that may change the result (like a new metadata
|
* there are other factors that may change the result (like a new metadata
|
||||||
* chunk).
|
* chunk).
|
||||||
*
|
*
|
||||||
|
* If metadata is exhausted, f_bavail will be 0.
|
||||||
|
*
|
||||||
* FIXME: not accurate for mixed block groups, total and free/used are ok,
|
* FIXME: not accurate for mixed block groups, total and free/used are ok,
|
||||||
* available appears slightly larger.
|
* available appears slightly larger.
|
||||||
*/
|
*/
|
||||||
@ -1965,11 +2002,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||||||
struct btrfs_space_info *found;
|
struct btrfs_space_info *found;
|
||||||
u64 total_used = 0;
|
u64 total_used = 0;
|
||||||
u64 total_free_data = 0;
|
u64 total_free_data = 0;
|
||||||
|
u64 total_free_meta = 0;
|
||||||
int bits = dentry->d_sb->s_blocksize_bits;
|
int bits = dentry->d_sb->s_blocksize_bits;
|
||||||
__be32 *fsid = (__be32 *)fs_info->fsid;
|
__be32 *fsid = (__be32 *)fs_info->fsid;
|
||||||
unsigned factor = 1;
|
unsigned factor = 1;
|
||||||
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
|
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
|
||||||
int ret;
|
int ret;
|
||||||
|
u64 thresh = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* holding chunk_muext to avoid allocating new chunks, holding
|
* holding chunk_muext to avoid allocating new chunks, holding
|
||||||
@ -1995,6 +2034,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
|
||||||
|
total_free_meta += found->disk_total - found->disk_used;
|
||||||
|
|
||||||
total_used += found->disk_used;
|
total_used += found->disk_used;
|
||||||
}
|
}
|
||||||
@ -2017,6 +2058,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||||||
buf->f_bavail += div_u64(total_free_data, factor);
|
buf->f_bavail += div_u64(total_free_data, factor);
|
||||||
buf->f_bavail = buf->f_bavail >> bits;
|
buf->f_bavail = buf->f_bavail >> bits;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We calculate the remaining metadata space minus global reserve. If
|
||||||
|
* this is (supposedly) smaller than zero, there's no space. But this
|
||||||
|
* does not hold in practice, the exhausted state happens where's still
|
||||||
|
* some positive delta. So we apply some guesswork and compare the
|
||||||
|
* delta to a 4M threshold. (Practically observed delta was ~2M.)
|
||||||
|
*
|
||||||
|
* We probably cannot calculate the exact threshold value because this
|
||||||
|
* depends on the internal reservations requested by various
|
||||||
|
* operations, so some operations that consume a few metadata will
|
||||||
|
* succeed even if the Avail is zero. But this is better than the other
|
||||||
|
* way around.
|
||||||
|
*/
|
||||||
|
thresh = 4 * 1024 * 1024;
|
||||||
|
|
||||||
|
if (total_free_meta - thresh < block_rsv->size)
|
||||||
|
buf->f_bavail = 0;
|
||||||
|
|
||||||
buf->f_type = BTRFS_SUPER_MAGIC;
|
buf->f_type = BTRFS_SUPER_MAGIC;
|
||||||
buf->f_bsize = dentry->d_sb->s_blocksize;
|
buf->f_bsize = dentry->d_sb->s_blocksize;
|
||||||
buf->f_namelen = BTRFS_NAME_LEN;
|
buf->f_namelen = BTRFS_NAME_LEN;
|
||||||
@ -2223,6 +2282,9 @@ static int btrfs_run_sanity_tests(void)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
ret = btrfs_test_qgroups();
|
ret = btrfs_test_qgroups();
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
ret = btrfs_test_free_space_tree();
|
||||||
out:
|
out:
|
||||||
btrfs_destroy_test_fs();
|
btrfs_destroy_test_fs();
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -21,6 +21,9 @@
|
|||||||
#include <linux/magic.h>
|
#include <linux/magic.h>
|
||||||
#include "btrfs-tests.h"
|
#include "btrfs-tests.h"
|
||||||
#include "../ctree.h"
|
#include "../ctree.h"
|
||||||
|
#include "../free-space-cache.h"
|
||||||
|
#include "../free-space-tree.h"
|
||||||
|
#include "../transaction.h"
|
||||||
#include "../volumes.h"
|
#include "../volumes.h"
|
||||||
#include "../disk-io.h"
|
#include "../disk-io.h"
|
||||||
#include "../qgroup.h"
|
#include "../qgroup.h"
|
||||||
@ -122,6 +125,9 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
|
|||||||
INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
|
INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
|
||||||
INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
|
INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
|
||||||
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
|
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
|
||||||
|
extent_io_tree_init(&fs_info->freed_extents[0], NULL);
|
||||||
|
extent_io_tree_init(&fs_info->freed_extents[1], NULL);
|
||||||
|
fs_info->pinned_extents = &fs_info->freed_extents[0];
|
||||||
return fs_info;
|
return fs_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,3 +175,55 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
|
|||||||
kfree(root);
|
kfree(root);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct btrfs_block_group_cache *
|
||||||
|
btrfs_alloc_dummy_block_group(unsigned long length)
|
||||||
|
{
|
||||||
|
struct btrfs_block_group_cache *cache;
|
||||||
|
|
||||||
|
cache = kzalloc(sizeof(*cache), GFP_NOFS);
|
||||||
|
if (!cache)
|
||||||
|
return NULL;
|
||||||
|
cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
|
||||||
|
GFP_NOFS);
|
||||||
|
if (!cache->free_space_ctl) {
|
||||||
|
kfree(cache);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
cache->fs_info = btrfs_alloc_dummy_fs_info();
|
||||||
|
if (!cache->fs_info) {
|
||||||
|
kfree(cache->free_space_ctl);
|
||||||
|
kfree(cache);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
cache->key.objectid = 0;
|
||||||
|
cache->key.offset = length;
|
||||||
|
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
|
||||||
|
cache->sectorsize = 4096;
|
||||||
|
cache->full_stripe_len = 4096;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&cache->list);
|
||||||
|
INIT_LIST_HEAD(&cache->cluster_list);
|
||||||
|
INIT_LIST_HEAD(&cache->bg_list);
|
||||||
|
btrfs_init_free_space_ctl(cache);
|
||||||
|
mutex_init(&cache->free_space_lock);
|
||||||
|
|
||||||
|
return cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache)
|
||||||
|
{
|
||||||
|
if (!cache)
|
||||||
|
return;
|
||||||
|
__btrfs_remove_free_space_cache(cache->free_space_ctl);
|
||||||
|
kfree(cache->free_space_ctl);
|
||||||
|
kfree(cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans)
|
||||||
|
{
|
||||||
|
memset(trans, 0, sizeof(*trans));
|
||||||
|
trans->transid = 1;
|
||||||
|
INIT_LIST_HEAD(&trans->qgroup_ref_list);
|
||||||
|
trans->type = __TRANS_DUMMY;
|
||||||
|
}
|
||||||
|
@ -24,17 +24,23 @@
|
|||||||
#define test_msg(fmt, ...) pr_info("BTRFS: selftest: " fmt, ##__VA_ARGS__)
|
#define test_msg(fmt, ...) pr_info("BTRFS: selftest: " fmt, ##__VA_ARGS__)
|
||||||
|
|
||||||
struct btrfs_root;
|
struct btrfs_root;
|
||||||
|
struct btrfs_trans_handle;
|
||||||
|
|
||||||
int btrfs_test_free_space_cache(void);
|
int btrfs_test_free_space_cache(void);
|
||||||
int btrfs_test_extent_buffer_operations(void);
|
int btrfs_test_extent_buffer_operations(void);
|
||||||
int btrfs_test_extent_io(void);
|
int btrfs_test_extent_io(void);
|
||||||
int btrfs_test_inodes(void);
|
int btrfs_test_inodes(void);
|
||||||
int btrfs_test_qgroups(void);
|
int btrfs_test_qgroups(void);
|
||||||
|
int btrfs_test_free_space_tree(void);
|
||||||
int btrfs_init_test_fs(void);
|
int btrfs_init_test_fs(void);
|
||||||
void btrfs_destroy_test_fs(void);
|
void btrfs_destroy_test_fs(void);
|
||||||
struct inode *btrfs_new_test_inode(void);
|
struct inode *btrfs_new_test_inode(void);
|
||||||
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
|
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
|
||||||
void btrfs_free_dummy_root(struct btrfs_root *root);
|
void btrfs_free_dummy_root(struct btrfs_root *root);
|
||||||
|
struct btrfs_block_group_cache *
|
||||||
|
btrfs_alloc_dummy_block_group(unsigned long length);
|
||||||
|
void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
|
||||||
|
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
|
||||||
#else
|
#else
|
||||||
static inline int btrfs_test_free_space_cache(void)
|
static inline int btrfs_test_free_space_cache(void)
|
||||||
{
|
{
|
||||||
@ -63,6 +69,10 @@ static inline int btrfs_test_qgroups(void)
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
static inline int btrfs_test_free_space_tree(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -18,6 +18,8 @@
|
|||||||
|
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/sizes.h>
|
||||||
#include "btrfs-tests.h"
|
#include "btrfs-tests.h"
|
||||||
#include "../extent_io.h"
|
#include "../extent_io.h"
|
||||||
|
|
||||||
@ -70,12 +72,14 @@ static int test_find_delalloc(void)
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
struct page *locked_page = NULL;
|
struct page *locked_page = NULL;
|
||||||
unsigned long index = 0;
|
unsigned long index = 0;
|
||||||
u64 total_dirty = 256 * 1024 * 1024;
|
u64 total_dirty = SZ_256M;
|
||||||
u64 max_bytes = 128 * 1024 * 1024;
|
u64 max_bytes = SZ_128M;
|
||||||
u64 start, end, test_start;
|
u64 start, end, test_start;
|
||||||
u64 found;
|
u64 found;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
|
test_msg("Running find delalloc tests\n");
|
||||||
|
|
||||||
inode = btrfs_new_test_inode();
|
inode = btrfs_new_test_inode();
|
||||||
if (!inode) {
|
if (!inode) {
|
||||||
test_msg("Failed to allocate test inode\n");
|
test_msg("Failed to allocate test inode\n");
|
||||||
@ -133,7 +137,7 @@ static int test_find_delalloc(void)
|
|||||||
* |--- delalloc ---|
|
* |--- delalloc ---|
|
||||||
* |--- search ---|
|
* |--- search ---|
|
||||||
*/
|
*/
|
||||||
test_start = 64 * 1024 * 1024;
|
test_start = SZ_64M;
|
||||||
locked_page = find_lock_page(inode->i_mapping,
|
locked_page = find_lock_page(inode->i_mapping,
|
||||||
test_start >> PAGE_CACHE_SHIFT);
|
test_start >> PAGE_CACHE_SHIFT);
|
||||||
if (!locked_page) {
|
if (!locked_page) {
|
||||||
@ -220,8 +224,8 @@ static int test_find_delalloc(void)
|
|||||||
* Now to test where we run into a page that is no longer dirty in the
|
* Now to test where we run into a page that is no longer dirty in the
|
||||||
* range we want to find.
|
* range we want to find.
|
||||||
*/
|
*/
|
||||||
page = find_get_page(inode->i_mapping, (max_bytes + (1 * 1024 * 1024))
|
page = find_get_page(inode->i_mapping,
|
||||||
>> PAGE_CACHE_SHIFT);
|
(max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
test_msg("Couldn't find our page\n");
|
test_msg("Couldn't find our page\n");
|
||||||
goto out_bits;
|
goto out_bits;
|
||||||
@ -268,8 +272,139 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
|
||||||
|
unsigned long len)
|
||||||
|
{
|
||||||
|
unsigned long i, x;
|
||||||
|
|
||||||
|
memset(bitmap, 0, len);
|
||||||
|
memset_extent_buffer(eb, 0, 0, len);
|
||||||
|
if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
|
||||||
|
test_msg("Bitmap was not zeroed\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
|
||||||
|
extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
|
||||||
|
if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
|
||||||
|
test_msg("Setting all bits failed\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
|
||||||
|
extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
|
||||||
|
if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
|
||||||
|
test_msg("Clearing all bits failed\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bitmap_set(bitmap, (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
|
||||||
|
sizeof(long) * BITS_PER_BYTE);
|
||||||
|
extent_buffer_bitmap_set(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
|
||||||
|
sizeof(long) * BITS_PER_BYTE);
|
||||||
|
if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
|
||||||
|
test_msg("Setting straddling pages failed\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
|
||||||
|
bitmap_clear(bitmap,
|
||||||
|
(PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
|
||||||
|
sizeof(long) * BITS_PER_BYTE);
|
||||||
|
extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
|
||||||
|
extent_buffer_bitmap_clear(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
|
||||||
|
sizeof(long) * BITS_PER_BYTE);
|
||||||
|
if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
|
||||||
|
test_msg("Clearing straddling pages failed\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Generate a wonky pseudo-random bit pattern for the sake of not using
|
||||||
|
* something repetitive that could miss some hypothetical off-by-n bug.
|
||||||
|
*/
|
||||||
|
x = 0;
|
||||||
|
for (i = 0; i < len / sizeof(long); i++) {
|
||||||
|
x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffUL;
|
||||||
|
bitmap[i] = x;
|
||||||
|
}
|
||||||
|
write_extent_buffer(eb, bitmap, 0, len);
|
||||||
|
|
||||||
|
for (i = 0; i < len * BITS_PER_BYTE; i++) {
|
||||||
|
int bit, bit1;
|
||||||
|
|
||||||
|
bit = !!test_bit(i, bitmap);
|
||||||
|
bit1 = !!extent_buffer_test_bit(eb, 0, i);
|
||||||
|
if (bit1 != bit) {
|
||||||
|
test_msg("Testing bit pattern failed\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
|
||||||
|
i % BITS_PER_BYTE);
|
||||||
|
if (bit1 != bit) {
|
||||||
|
test_msg("Testing bit pattern with offset failed\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_eb_bitmaps(void)
|
||||||
|
{
|
||||||
|
unsigned long len = PAGE_CACHE_SIZE * 4;
|
||||||
|
unsigned long *bitmap;
|
||||||
|
struct extent_buffer *eb;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
test_msg("Running extent buffer bitmap tests\n");
|
||||||
|
|
||||||
|
bitmap = kmalloc(len, GFP_NOFS);
|
||||||
|
if (!bitmap) {
|
||||||
|
test_msg("Couldn't allocate test bitmap\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
eb = __alloc_dummy_extent_buffer(NULL, 0, len);
|
||||||
|
if (!eb) {
|
||||||
|
test_msg("Couldn't allocate test extent buffer\n");
|
||||||
|
kfree(bitmap);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __test_eb_bitmaps(bitmap, eb, len);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/* Do it over again with an extent buffer which isn't page-aligned. */
|
||||||
|
free_extent_buffer(eb);
|
||||||
|
eb = __alloc_dummy_extent_buffer(NULL, PAGE_CACHE_SIZE / 2, len);
|
||||||
|
if (!eb) {
|
||||||
|
test_msg("Couldn't allocate test extent buffer\n");
|
||||||
|
kfree(bitmap);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __test_eb_bitmaps(bitmap, eb, len);
|
||||||
|
out:
|
||||||
|
free_extent_buffer(eb);
|
||||||
|
kfree(bitmap);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int btrfs_test_extent_io(void)
|
int btrfs_test_extent_io(void)
|
||||||
{
|
{
|
||||||
test_msg("Running find delalloc tests\n");
|
int ret;
|
||||||
return test_find_delalloc();
|
|
||||||
|
test_msg("Running extent I/O tests\n");
|
||||||
|
|
||||||
|
ret = test_find_delalloc();
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
ret = test_eb_bitmaps();
|
||||||
|
out:
|
||||||
|
test_msg("Extent I/O tests finished\n");
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -23,41 +23,6 @@
|
|||||||
#include "../free-space-cache.h"
|
#include "../free-space-cache.h"
|
||||||
|
|
||||||
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
|
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
|
||||||
static struct btrfs_block_group_cache *init_test_block_group(void)
|
|
||||||
{
|
|
||||||
struct btrfs_block_group_cache *cache;
|
|
||||||
|
|
||||||
cache = kzalloc(sizeof(*cache), GFP_NOFS);
|
|
||||||
if (!cache)
|
|
||||||
return NULL;
|
|
||||||
cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
|
|
||||||
GFP_NOFS);
|
|
||||||
if (!cache->free_space_ctl) {
|
|
||||||
kfree(cache);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
cache->fs_info = btrfs_alloc_dummy_fs_info();
|
|
||||||
if (!cache->fs_info) {
|
|
||||||
kfree(cache->free_space_ctl);
|
|
||||||
kfree(cache);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
cache->key.objectid = 0;
|
|
||||||
cache->key.offset = 1024 * 1024 * 1024;
|
|
||||||
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
|
|
||||||
cache->sectorsize = 4096;
|
|
||||||
cache->full_stripe_len = 4096;
|
|
||||||
|
|
||||||
spin_lock_init(&cache->lock);
|
|
||||||
INIT_LIST_HEAD(&cache->list);
|
|
||||||
INIT_LIST_HEAD(&cache->cluster_list);
|
|
||||||
INIT_LIST_HEAD(&cache->bg_list);
|
|
||||||
|
|
||||||
btrfs_init_free_space_ctl(cache);
|
|
||||||
|
|
||||||
return cache;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This test just does basic sanity checking, making sure we can add an exten
|
* This test just does basic sanity checking, making sure we can add an exten
|
||||||
@ -71,59 +36,59 @@ static int test_extents(struct btrfs_block_group_cache *cache)
|
|||||||
test_msg("Running extent only tests\n");
|
test_msg("Running extent only tests\n");
|
||||||
|
|
||||||
/* First just make sure we can remove an entire entry */
|
/* First just make sure we can remove an entire entry */
|
||||||
ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024);
|
ret = btrfs_add_free_space(cache, 0, SZ_4M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error adding initial extents %d\n", ret);
|
test_msg("Error adding initial extents %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024);
|
ret = btrfs_remove_free_space(cache, 0, SZ_4M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error removing extent %d\n", ret);
|
test_msg("Error removing extent %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_check_exists(cache, 0, 4 * 1024 * 1024)) {
|
if (test_check_exists(cache, 0, SZ_4M)) {
|
||||||
test_msg("Full remove left some lingering space\n");
|
test_msg("Full remove left some lingering space\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ok edge and middle cases now */
|
/* Ok edge and middle cases now */
|
||||||
ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024);
|
ret = btrfs_add_free_space(cache, 0, SZ_4M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error adding half extent %d\n", ret);
|
test_msg("Error adding half extent %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 1 * 1024 * 1024);
|
ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_1M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error removing tail end %d\n", ret);
|
test_msg("Error removing tail end %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024);
|
ret = btrfs_remove_free_space(cache, 0, SZ_1M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error removing front end %d\n", ret);
|
test_msg("Error removing front end %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, 2 * 1024 * 1024, 4096);
|
ret = btrfs_remove_free_space(cache, SZ_2M, 4096);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error removing middle piece %d\n", ret);
|
test_msg("Error removing middle piece %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_check_exists(cache, 0, 1 * 1024 * 1024)) {
|
if (test_check_exists(cache, 0, SZ_1M)) {
|
||||||
test_msg("Still have space at the front\n");
|
test_msg("Still have space at the front\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_check_exists(cache, 2 * 1024 * 1024, 4096)) {
|
if (test_check_exists(cache, SZ_2M, 4096)) {
|
||||||
test_msg("Still have space in the middle\n");
|
test_msg("Still have space in the middle\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_check_exists(cache, 3 * 1024 * 1024, 1 * 1024 * 1024)) {
|
if (test_check_exists(cache, 3 * SZ_1M, SZ_1M)) {
|
||||||
test_msg("Still have space at the end\n");
|
test_msg("Still have space at the end\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -141,30 +106,30 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
|
|||||||
|
|
||||||
test_msg("Running bitmap only tests\n");
|
test_msg("Running bitmap only tests\n");
|
||||||
|
|
||||||
ret = test_add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1);
|
ret = test_add_free_space_entry(cache, 0, SZ_4M, 1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't create a bitmap entry %d\n", ret);
|
test_msg("Couldn't create a bitmap entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024);
|
ret = btrfs_remove_free_space(cache, 0, SZ_4M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error removing bitmap full range %d\n", ret);
|
test_msg("Error removing bitmap full range %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_check_exists(cache, 0, 4 * 1024 * 1024)) {
|
if (test_check_exists(cache, 0, SZ_4M)) {
|
||||||
test_msg("Left some space in bitmap\n");
|
test_msg("Left some space in bitmap\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = test_add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1);
|
ret = test_add_free_space_entry(cache, 0, SZ_4M, 1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add to our bitmap entry %d\n", ret);
|
test_msg("Couldn't add to our bitmap entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 2 * 1024 * 1024);
|
ret = btrfs_remove_free_space(cache, SZ_1M, SZ_2M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't remove middle chunk %d\n", ret);
|
test_msg("Couldn't remove middle chunk %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -177,23 +142,21 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
|
|||||||
next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
|
next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
|
||||||
|
|
||||||
/* Test a bit straddling two bitmaps */
|
/* Test a bit straddling two bitmaps */
|
||||||
ret = test_add_free_space_entry(cache, next_bitmap_offset -
|
ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M,
|
||||||
(2 * 1024 * 1024), 4 * 1024 * 1024, 1);
|
SZ_4M, 1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add space that straddles two bitmaps %d\n",
|
test_msg("Couldn't add space that straddles two bitmaps %d\n",
|
||||||
ret);
|
ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, next_bitmap_offset -
|
ret = btrfs_remove_free_space(cache, next_bitmap_offset - SZ_1M, SZ_2M);
|
||||||
(1 * 1024 * 1024), 2 * 1024 * 1024);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't remove overlapping space %d\n", ret);
|
test_msg("Couldn't remove overlapping space %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_check_exists(cache, next_bitmap_offset - (1 * 1024 * 1024),
|
if (test_check_exists(cache, next_bitmap_offset - SZ_1M, SZ_2M)) {
|
||||||
2 * 1024 * 1024)) {
|
|
||||||
test_msg("Left some space when removing overlapping\n");
|
test_msg("Left some space when removing overlapping\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -216,43 +179,43 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
|
|||||||
* bitmap, but the free space completely in the extent and then
|
* bitmap, but the free space completely in the extent and then
|
||||||
* completely in the bitmap.
|
* completely in the bitmap.
|
||||||
*/
|
*/
|
||||||
ret = test_add_free_space_entry(cache, 4 * 1024 * 1024, 1 * 1024 * 1024, 1);
|
ret = test_add_free_space_entry(cache, SZ_4M, SZ_1M, 1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't create bitmap entry %d\n", ret);
|
test_msg("Couldn't create bitmap entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = test_add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0);
|
ret = test_add_free_space_entry(cache, 0, SZ_1M, 0);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add extent entry %d\n", ret);
|
test_msg("Couldn't add extent entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024);
|
ret = btrfs_remove_free_space(cache, 0, SZ_1M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't remove extent entry %d\n", ret);
|
test_msg("Couldn't remove extent entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_check_exists(cache, 0, 1 * 1024 * 1024)) {
|
if (test_check_exists(cache, 0, SZ_1M)) {
|
||||||
test_msg("Left remnants after our remove\n");
|
test_msg("Left remnants after our remove\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Now to add back the extent entry and remove from the bitmap */
|
/* Now to add back the extent entry and remove from the bitmap */
|
||||||
ret = test_add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0);
|
ret = test_add_free_space_entry(cache, 0, SZ_1M, 0);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't re-add extent entry %d\n", ret);
|
test_msg("Couldn't re-add extent entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, 4 * 1024 * 1024, 1 * 1024 * 1024);
|
ret = btrfs_remove_free_space(cache, SZ_4M, SZ_1M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't remove from bitmap %d\n", ret);
|
test_msg("Couldn't remove from bitmap %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_check_exists(cache, 4 * 1024 * 1024, 1 * 1024 * 1024)) {
|
if (test_check_exists(cache, SZ_4M, SZ_1M)) {
|
||||||
test_msg("Left remnants in the bitmap\n");
|
test_msg("Left remnants in the bitmap\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -261,19 +224,19 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
|
|||||||
* Ok so a little more evil, extent entry and bitmap at the same offset,
|
* Ok so a little more evil, extent entry and bitmap at the same offset,
|
||||||
* removing an overlapping chunk.
|
* removing an overlapping chunk.
|
||||||
*/
|
*/
|
||||||
ret = test_add_free_space_entry(cache, 1 * 1024 * 1024, 4 * 1024 * 1024, 1);
|
ret = test_add_free_space_entry(cache, SZ_1M, SZ_4M, 1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add to a bitmap %d\n", ret);
|
test_msg("Couldn't add to a bitmap %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, 512 * 1024, 3 * 1024 * 1024);
|
ret = btrfs_remove_free_space(cache, SZ_512K, 3 * SZ_1M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't remove overlapping space %d\n", ret);
|
test_msg("Couldn't remove overlapping space %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_check_exists(cache, 512 * 1024, 3 * 1024 * 1024)) {
|
if (test_check_exists(cache, SZ_512K, 3 * SZ_1M)) {
|
||||||
test_msg("Left over pieces after removing overlapping\n");
|
test_msg("Left over pieces after removing overlapping\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -281,25 +244,25 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
|
|||||||
__btrfs_remove_free_space_cache(cache->free_space_ctl);
|
__btrfs_remove_free_space_cache(cache->free_space_ctl);
|
||||||
|
|
||||||
/* Now with the extent entry offset into the bitmap */
|
/* Now with the extent entry offset into the bitmap */
|
||||||
ret = test_add_free_space_entry(cache, 4 * 1024 * 1024, 4 * 1024 * 1024, 1);
|
ret = test_add_free_space_entry(cache, SZ_4M, SZ_4M, 1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add space to the bitmap %d\n", ret);
|
test_msg("Couldn't add space to the bitmap %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = test_add_free_space_entry(cache, 2 * 1024 * 1024, 2 * 1024 * 1024, 0);
|
ret = test_add_free_space_entry(cache, SZ_2M, SZ_2M, 0);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add extent to the cache %d\n", ret);
|
test_msg("Couldn't add extent to the cache %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 4 * 1024 * 1024);
|
ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_4M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Problem removing overlapping space %d\n", ret);
|
test_msg("Problem removing overlapping space %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_check_exists(cache, 3 * 1024 * 1024, 4 * 1024 * 1024)) {
|
if (test_check_exists(cache, 3 * SZ_1M, SZ_4M)) {
|
||||||
test_msg("Left something behind when removing space");
|
test_msg("Left something behind when removing space");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -315,29 +278,26 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
|
|||||||
* [ del ]
|
* [ del ]
|
||||||
*/
|
*/
|
||||||
__btrfs_remove_free_space_cache(cache->free_space_ctl);
|
__btrfs_remove_free_space_cache(cache->free_space_ctl);
|
||||||
ret = test_add_free_space_entry(cache, bitmap_offset + 4 * 1024 * 1024,
|
ret = test_add_free_space_entry(cache, bitmap_offset + SZ_4M, SZ_4M, 1);
|
||||||
4 * 1024 * 1024, 1);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add bitmap %d\n", ret);
|
test_msg("Couldn't add bitmap %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = test_add_free_space_entry(cache, bitmap_offset - 1 * 1024 * 1024,
|
ret = test_add_free_space_entry(cache, bitmap_offset - SZ_1M,
|
||||||
5 * 1024 * 1024, 0);
|
5 * SZ_1M, 0);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add extent entry %d\n", ret);
|
test_msg("Couldn't add extent entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, bitmap_offset + 1 * 1024 * 1024,
|
ret = btrfs_remove_free_space(cache, bitmap_offset + SZ_1M, 5 * SZ_1M);
|
||||||
5 * 1024 * 1024);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Failed to free our space %d\n", ret);
|
test_msg("Failed to free our space %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_check_exists(cache, bitmap_offset + 1 * 1024 * 1024,
|
if (test_check_exists(cache, bitmap_offset + SZ_1M, 5 * SZ_1M)) {
|
||||||
5 * 1024 * 1024)) {
|
|
||||||
test_msg("Left stuff over\n");
|
test_msg("Left stuff over\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -350,19 +310,19 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
|
|||||||
* to return -EAGAIN back from btrfs_remove_extent, make sure this
|
* to return -EAGAIN back from btrfs_remove_extent, make sure this
|
||||||
* doesn't happen.
|
* doesn't happen.
|
||||||
*/
|
*/
|
||||||
ret = test_add_free_space_entry(cache, 1 * 1024 * 1024, 2 * 1024 * 1024, 1);
|
ret = test_add_free_space_entry(cache, SZ_1M, SZ_2M, 1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add bitmap entry %d\n", ret);
|
test_msg("Couldn't add bitmap entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = test_add_free_space_entry(cache, 3 * 1024 * 1024, 1 * 1024 * 1024, 0);
|
ret = test_add_free_space_entry(cache, 3 * SZ_1M, SZ_1M, 0);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add extent entry %d\n", ret);
|
test_msg("Couldn't add extent entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 3 * 1024 * 1024);
|
ret = btrfs_remove_free_space(cache, SZ_1M, 3 * SZ_1M);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error removing bitmap and extent overlapping %d\n", ret);
|
test_msg("Error removing bitmap and extent overlapping %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -445,9 +405,11 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
int ret;
|
int ret;
|
||||||
u64 offset;
|
u64 offset;
|
||||||
u64 max_extent_size;
|
u64 max_extent_size;
|
||||||
|
const struct btrfs_free_space_op test_free_space_ops = {
|
||||||
bool (*use_bitmap_op)(struct btrfs_free_space_ctl *,
|
.recalc_thresholds = cache->free_space_ctl->op->recalc_thresholds,
|
||||||
struct btrfs_free_space *);
|
.use_bitmap = test_use_bitmap,
|
||||||
|
};
|
||||||
|
const struct btrfs_free_space_op *orig_free_space_ops;
|
||||||
|
|
||||||
test_msg("Running space stealing from bitmap to extent\n");
|
test_msg("Running space stealing from bitmap to extent\n");
|
||||||
|
|
||||||
@ -469,22 +431,21 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* that forces use of bitmaps as soon as we have at least 1
|
* that forces use of bitmaps as soon as we have at least 1
|
||||||
* extent entry.
|
* extent entry.
|
||||||
*/
|
*/
|
||||||
use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
|
orig_free_space_ops = cache->free_space_ctl->op;
|
||||||
cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
|
cache->free_space_ctl->op = &test_free_space_ops;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
|
* Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
|
||||||
*/
|
*/
|
||||||
ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 - 256 * 1024,
|
ret = test_add_free_space_entry(cache, SZ_128M - SZ_256K, SZ_128K, 0);
|
||||||
128 * 1024, 0);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add extent entry %d\n", ret);
|
test_msg("Couldn't add extent entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Bitmap entry covering free space range [128Mb + 512Kb, 256Mb[ */
|
/* Bitmap entry covering free space range [128Mb + 512Kb, 256Mb[ */
|
||||||
ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 512 * 1024,
|
ret = test_add_free_space_entry(cache, SZ_128M + SZ_512K,
|
||||||
128 * 1024 * 1024 - 512 * 1024, 1);
|
SZ_128M - SZ_512K, 1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add bitmap entry %d\n", ret);
|
test_msg("Couldn't add bitmap entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -502,21 +463,19 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* [128Mb + 512Kb, 128Mb + 768Kb[
|
* [128Mb + 512Kb, 128Mb + 768Kb[
|
||||||
*/
|
*/
|
||||||
ret = btrfs_remove_free_space(cache,
|
ret = btrfs_remove_free_space(cache,
|
||||||
128 * 1024 * 1024 + 768 * 1024,
|
SZ_128M + 768 * SZ_1K,
|
||||||
128 * 1024 * 1024 - 768 * 1024);
|
SZ_128M - 768 * SZ_1K);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Failed to free part of bitmap space %d\n", ret);
|
test_msg("Failed to free part of bitmap space %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Confirm that only those 2 ranges are marked as free. */
|
/* Confirm that only those 2 ranges are marked as free. */
|
||||||
if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024,
|
if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_128K)) {
|
||||||
128 * 1024)) {
|
|
||||||
test_msg("Free space range missing\n");
|
test_msg("Free space range missing\n");
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
if (!test_check_exists(cache, 128 * 1024 * 1024 + 512 * 1024,
|
if (!test_check_exists(cache, SZ_128M + SZ_512K, SZ_256K)) {
|
||||||
256 * 1024)) {
|
|
||||||
test_msg("Free space range missing\n");
|
test_msg("Free space range missing\n");
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
@ -525,8 +484,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* Confirm that the bitmap range [128Mb + 768Kb, 256Mb[ isn't marked
|
* Confirm that the bitmap range [128Mb + 768Kb, 256Mb[ isn't marked
|
||||||
* as free anymore.
|
* as free anymore.
|
||||||
*/
|
*/
|
||||||
if (test_check_exists(cache, 128 * 1024 * 1024 + 768 * 1024,
|
if (test_check_exists(cache, SZ_128M + 768 * SZ_1K,
|
||||||
128 * 1024 * 1024 - 768 * 1024)) {
|
SZ_128M - 768 * SZ_1K)) {
|
||||||
test_msg("Bitmap region not removed from space cache\n");
|
test_msg("Bitmap region not removed from space cache\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -535,8 +494,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* Confirm that the region [128Mb + 256Kb, 128Mb + 512Kb[, which is
|
* Confirm that the region [128Mb + 256Kb, 128Mb + 512Kb[, which is
|
||||||
* covered by the bitmap, isn't marked as free.
|
* covered by the bitmap, isn't marked as free.
|
||||||
*/
|
*/
|
||||||
if (test_check_exists(cache, 128 * 1024 * 1024 + 256 * 1024,
|
if (test_check_exists(cache, SZ_128M + SZ_256K, SZ_256K)) {
|
||||||
256 * 1024)) {
|
|
||||||
test_msg("Invalid bitmap region marked as free\n");
|
test_msg("Invalid bitmap region marked as free\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -545,8 +503,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* Confirm that the region [128Mb, 128Mb + 256Kb[, which is covered
|
* Confirm that the region [128Mb, 128Mb + 256Kb[, which is covered
|
||||||
* by the bitmap too, isn't marked as free either.
|
* by the bitmap too, isn't marked as free either.
|
||||||
*/
|
*/
|
||||||
if (test_check_exists(cache, 128 * 1024 * 1024,
|
if (test_check_exists(cache, SZ_128M, SZ_256K)) {
|
||||||
256 * 1024)) {
|
|
||||||
test_msg("Invalid bitmap region marked as free\n");
|
test_msg("Invalid bitmap region marked as free\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -556,13 +513,13 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* lets make sure the free space cache marks it as free in the bitmap,
|
* lets make sure the free space cache marks it as free in the bitmap,
|
||||||
* and doesn't insert a new extent entry to represent this region.
|
* and doesn't insert a new extent entry to represent this region.
|
||||||
*/
|
*/
|
||||||
ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 512 * 1024);
|
ret = btrfs_add_free_space(cache, SZ_128M, SZ_512K);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error adding free space: %d\n", ret);
|
test_msg("Error adding free space: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
/* Confirm the region is marked as free. */
|
/* Confirm the region is marked as free. */
|
||||||
if (!test_check_exists(cache, 128 * 1024 * 1024, 512 * 1024)) {
|
if (!test_check_exists(cache, SZ_128M, SZ_512K)) {
|
||||||
test_msg("Bitmap region not marked as free\n");
|
test_msg("Bitmap region not marked as free\n");
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
@ -581,8 +538,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* The goal is to test that the bitmap entry space stealing doesn't
|
* The goal is to test that the bitmap entry space stealing doesn't
|
||||||
* steal this space region.
|
* steal this space region.
|
||||||
*/
|
*/
|
||||||
ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 + 16 * 1024 * 1024,
|
ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, 4096);
|
||||||
4096);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error adding free space: %d\n", ret);
|
test_msg("Error adding free space: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -601,15 +557,13 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* expand the range covered by the existing extent entry that represents
|
* expand the range covered by the existing extent entry that represents
|
||||||
* the free space [128Mb - 256Kb, 128Mb - 128Kb[.
|
* the free space [128Mb - 256Kb, 128Mb - 128Kb[.
|
||||||
*/
|
*/
|
||||||
ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 128 * 1024,
|
ret = btrfs_add_free_space(cache, SZ_128M - SZ_128K, SZ_128K);
|
||||||
128 * 1024);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error adding free space: %d\n", ret);
|
test_msg("Error adding free space: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
/* Confirm the region is marked as free. */
|
/* Confirm the region is marked as free. */
|
||||||
if (!test_check_exists(cache, 128 * 1024 * 1024 - 128 * 1024,
|
if (!test_check_exists(cache, SZ_128M - SZ_128K, SZ_128K)) {
|
||||||
128 * 1024)) {
|
|
||||||
test_msg("Extent region not marked as free\n");
|
test_msg("Extent region not marked as free\n");
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
@ -637,21 +591,20 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* that represents the 1Mb free space, and therefore we're able to
|
* that represents the 1Mb free space, and therefore we're able to
|
||||||
* allocate the whole free space at once.
|
* allocate the whole free space at once.
|
||||||
*/
|
*/
|
||||||
if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024,
|
if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_1M)) {
|
||||||
1 * 1024 * 1024)) {
|
|
||||||
test_msg("Expected region not marked as free\n");
|
test_msg("Expected region not marked as free\n");
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 4096)) {
|
if (cache->free_space_ctl->free_space != (SZ_1M + 4096)) {
|
||||||
test_msg("Cache free space is not 1Mb + 4Kb\n");
|
test_msg("Cache free space is not 1Mb + 4Kb\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = btrfs_find_space_for_alloc(cache,
|
offset = btrfs_find_space_for_alloc(cache,
|
||||||
0, 1 * 1024 * 1024, 0,
|
0, SZ_1M, 0,
|
||||||
&max_extent_size);
|
&max_extent_size);
|
||||||
if (offset != (128 * 1024 * 1024 - 256 * 1024)) {
|
if (offset != (SZ_128M - SZ_256K)) {
|
||||||
test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n",
|
test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n",
|
||||||
offset);
|
offset);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -670,7 +623,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
offset = btrfs_find_space_for_alloc(cache,
|
offset = btrfs_find_space_for_alloc(cache,
|
||||||
0, 4096, 0,
|
0, 4096, 0,
|
||||||
&max_extent_size);
|
&max_extent_size);
|
||||||
if (offset != (128 * 1024 * 1024 + 16 * 1024 * 1024)) {
|
if (offset != (SZ_128M + SZ_16M)) {
|
||||||
test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n",
|
test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n",
|
||||||
offset);
|
offset);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -691,16 +644,14 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
/*
|
/*
|
||||||
* Extent entry covering free space range [128Mb + 128Kb, 128Mb + 256Kb[
|
* Extent entry covering free space range [128Mb + 128Kb, 128Mb + 256Kb[
|
||||||
*/
|
*/
|
||||||
ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 128 * 1024,
|
ret = test_add_free_space_entry(cache, SZ_128M + SZ_128K, SZ_128K, 0);
|
||||||
128 * 1024, 0);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add extent entry %d\n", ret);
|
test_msg("Couldn't add extent entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Bitmap entry covering free space range [0, 128Mb - 512Kb[ */
|
/* Bitmap entry covering free space range [0, 128Mb - 512Kb[ */
|
||||||
ret = test_add_free_space_entry(cache, 0,
|
ret = test_add_free_space_entry(cache, 0, SZ_128M - SZ_512K, 1);
|
||||||
128 * 1024 * 1024 - 512 * 1024, 1);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Couldn't add bitmap entry %d\n", ret);
|
test_msg("Couldn't add bitmap entry %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -717,22 +668,18 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* [128Mb + 128b, 128Mb + 256Kb[
|
* [128Mb + 128b, 128Mb + 256Kb[
|
||||||
* [128Mb - 768Kb, 128Mb - 512Kb[
|
* [128Mb - 768Kb, 128Mb - 512Kb[
|
||||||
*/
|
*/
|
||||||
ret = btrfs_remove_free_space(cache,
|
ret = btrfs_remove_free_space(cache, 0, SZ_128M - 768 * SZ_1K);
|
||||||
0,
|
|
||||||
128 * 1024 * 1024 - 768 * 1024);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Failed to free part of bitmap space %d\n", ret);
|
test_msg("Failed to free part of bitmap space %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Confirm that only those 2 ranges are marked as free. */
|
/* Confirm that only those 2 ranges are marked as free. */
|
||||||
if (!test_check_exists(cache, 128 * 1024 * 1024 + 128 * 1024,
|
if (!test_check_exists(cache, SZ_128M + SZ_128K, SZ_128K)) {
|
||||||
128 * 1024)) {
|
|
||||||
test_msg("Free space range missing\n");
|
test_msg("Free space range missing\n");
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024,
|
if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_256K)) {
|
||||||
256 * 1024)) {
|
|
||||||
test_msg("Free space range missing\n");
|
test_msg("Free space range missing\n");
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
@ -741,8 +688,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* Confirm that the bitmap range [0, 128Mb - 768Kb[ isn't marked
|
* Confirm that the bitmap range [0, 128Mb - 768Kb[ isn't marked
|
||||||
* as free anymore.
|
* as free anymore.
|
||||||
*/
|
*/
|
||||||
if (test_check_exists(cache, 0,
|
if (test_check_exists(cache, 0, SZ_128M - 768 * SZ_1K)) {
|
||||||
128 * 1024 * 1024 - 768 * 1024)) {
|
|
||||||
test_msg("Bitmap region not removed from space cache\n");
|
test_msg("Bitmap region not removed from space cache\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -751,8 +697,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* Confirm that the region [128Mb - 512Kb, 128Mb[, which is
|
* Confirm that the region [128Mb - 512Kb, 128Mb[, which is
|
||||||
* covered by the bitmap, isn't marked as free.
|
* covered by the bitmap, isn't marked as free.
|
||||||
*/
|
*/
|
||||||
if (test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024,
|
if (test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) {
|
||||||
512 * 1024)) {
|
|
||||||
test_msg("Invalid bitmap region marked as free\n");
|
test_msg("Invalid bitmap region marked as free\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -762,15 +707,13 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* lets make sure the free space cache marks it as free in the bitmap,
|
* lets make sure the free space cache marks it as free in the bitmap,
|
||||||
* and doesn't insert a new extent entry to represent this region.
|
* and doesn't insert a new extent entry to represent this region.
|
||||||
*/
|
*/
|
||||||
ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 512 * 1024,
|
ret = btrfs_add_free_space(cache, SZ_128M - SZ_512K, SZ_512K);
|
||||||
512 * 1024);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error adding free space: %d\n", ret);
|
test_msg("Error adding free space: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
/* Confirm the region is marked as free. */
|
/* Confirm the region is marked as free. */
|
||||||
if (!test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024,
|
if (!test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) {
|
||||||
512 * 1024)) {
|
|
||||||
test_msg("Bitmap region not marked as free\n");
|
test_msg("Bitmap region not marked as free\n");
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
@ -789,7 +732,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* The goal is to test that the bitmap entry space stealing doesn't
|
* The goal is to test that the bitmap entry space stealing doesn't
|
||||||
* steal this space region.
|
* steal this space region.
|
||||||
*/
|
*/
|
||||||
ret = btrfs_add_free_space(cache, 32 * 1024 * 1024, 8192);
|
ret = btrfs_add_free_space(cache, SZ_32M, 8192);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error adding free space: %d\n", ret);
|
test_msg("Error adding free space: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -800,13 +743,13 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* expand the range covered by the existing extent entry that represents
|
* expand the range covered by the existing extent entry that represents
|
||||||
* the free space [128Mb + 128Kb, 128Mb + 256Kb[.
|
* the free space [128Mb + 128Kb, 128Mb + 256Kb[.
|
||||||
*/
|
*/
|
||||||
ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 128 * 1024);
|
ret = btrfs_add_free_space(cache, SZ_128M, SZ_128K);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
test_msg("Error adding free space: %d\n", ret);
|
test_msg("Error adding free space: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
/* Confirm the region is marked as free. */
|
/* Confirm the region is marked as free. */
|
||||||
if (!test_check_exists(cache, 128 * 1024 * 1024, 128 * 1024)) {
|
if (!test_check_exists(cache, SZ_128M, SZ_128K)) {
|
||||||
test_msg("Extent region not marked as free\n");
|
test_msg("Extent region not marked as free\n");
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
@ -834,21 +777,19 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
* that represents the 1Mb free space, and therefore we're able to
|
* that represents the 1Mb free space, and therefore we're able to
|
||||||
* allocate the whole free space at once.
|
* allocate the whole free space at once.
|
||||||
*/
|
*/
|
||||||
if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024,
|
if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_1M)) {
|
||||||
1 * 1024 * 1024)) {
|
|
||||||
test_msg("Expected region not marked as free\n");
|
test_msg("Expected region not marked as free\n");
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 8192)) {
|
if (cache->free_space_ctl->free_space != (SZ_1M + 8192)) {
|
||||||
test_msg("Cache free space is not 1Mb + 8Kb\n");
|
test_msg("Cache free space is not 1Mb + 8Kb\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = btrfs_find_space_for_alloc(cache,
|
offset = btrfs_find_space_for_alloc(cache, 0, SZ_1M, 0,
|
||||||
0, 1 * 1024 * 1024, 0,
|
|
||||||
&max_extent_size);
|
&max_extent_size);
|
||||||
if (offset != (128 * 1024 * 1024 - 768 * 1024)) {
|
if (offset != (SZ_128M - 768 * SZ_1K)) {
|
||||||
test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n",
|
test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n",
|
||||||
offset);
|
offset);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -867,7 +808,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
offset = btrfs_find_space_for_alloc(cache,
|
offset = btrfs_find_space_for_alloc(cache,
|
||||||
0, 8192, 0,
|
0, 8192, 0,
|
||||||
&max_extent_size);
|
&max_extent_size);
|
||||||
if (offset != (32 * 1024 * 1024)) {
|
if (offset != SZ_32M) {
|
||||||
test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n",
|
test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n",
|
||||||
offset);
|
offset);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -877,7 +818,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
|
cache->free_space_ctl->op = orig_free_space_ops;
|
||||||
__btrfs_remove_free_space_cache(cache->free_space_ctl);
|
__btrfs_remove_free_space_cache(cache->free_space_ctl);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -891,7 +832,7 @@ int btrfs_test_free_space_cache(void)
|
|||||||
|
|
||||||
test_msg("Running btrfs free space cache tests\n");
|
test_msg("Running btrfs free space cache tests\n");
|
||||||
|
|
||||||
cache = init_test_block_group();
|
cache = btrfs_alloc_dummy_block_group(1024 * 1024 * 1024);
|
||||||
if (!cache) {
|
if (!cache) {
|
||||||
test_msg("Couldn't run the tests\n");
|
test_msg("Couldn't run the tests\n");
|
||||||
return 0;
|
return 0;
|
||||||
@ -922,9 +863,7 @@ int btrfs_test_free_space_cache(void)
|
|||||||
|
|
||||||
ret = test_steal_space_from_bitmap_to_extent(cache);
|
ret = test_steal_space_from_bitmap_to_extent(cache);
|
||||||
out:
|
out:
|
||||||
__btrfs_remove_free_space_cache(cache->free_space_ctl);
|
btrfs_free_dummy_block_group(cache);
|
||||||
kfree(cache->free_space_ctl);
|
|
||||||
kfree(cache);
|
|
||||||
btrfs_free_dummy_root(root);
|
btrfs_free_dummy_root(root);
|
||||||
test_msg("Free space cache tests finished\n");
|
test_msg("Free space cache tests finished\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
571
fs/btrfs/tests/free-space-tree-tests.c
Normal file
571
fs/btrfs/tests/free-space-tree-tests.c
Normal file
@ -0,0 +1,571 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2015 Facebook. All rights reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public
|
||||||
|
* License v2 as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public
|
||||||
|
* License along with this program; if not, write to the
|
||||||
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||||
|
* Boston, MA 021110-1307, USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "btrfs-tests.h"
|
||||||
|
#include "../ctree.h"
|
||||||
|
#include "../disk-io.h"
|
||||||
|
#include "../free-space-tree.h"
|
||||||
|
#include "../transaction.h"
|
||||||
|
|
||||||
|
struct free_space_extent {
|
||||||
|
u64 start, length;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The test cases align their operations to this in order to hit some of the
|
||||||
|
* edge cases in the bitmap code.
|
||||||
|
*/
|
||||||
|
#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * 4096)
|
||||||
|
|
||||||
|
static int __check_free_space_extents(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *cache,
|
||||||
|
struct btrfs_path *path,
|
||||||
|
struct free_space_extent *extents,
|
||||||
|
unsigned int num_extents)
|
||||||
|
{
|
||||||
|
struct btrfs_free_space_info *info;
|
||||||
|
struct btrfs_key key;
|
||||||
|
int prev_bit = 0, bit;
|
||||||
|
u64 extent_start = 0, offset, end;
|
||||||
|
u32 flags, extent_count;
|
||||||
|
unsigned int i;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
info = search_free_space_info(trans, fs_info, cache, path, 0);
|
||||||
|
if (IS_ERR(info)) {
|
||||||
|
test_msg("Could not find free space info\n");
|
||||||
|
ret = PTR_ERR(info);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
flags = btrfs_free_space_flags(path->nodes[0], info);
|
||||||
|
extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
|
||||||
|
|
||||||
|
if (extent_count != num_extents) {
|
||||||
|
test_msg("Extent count is wrong\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
|
||||||
|
if (path->slots[0] != 0)
|
||||||
|
goto invalid;
|
||||||
|
end = cache->key.objectid + cache->key.offset;
|
||||||
|
i = 0;
|
||||||
|
while (++path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
|
||||||
|
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
||||||
|
if (key.type != BTRFS_FREE_SPACE_BITMAP_KEY)
|
||||||
|
goto invalid;
|
||||||
|
offset = key.objectid;
|
||||||
|
while (offset < key.objectid + key.offset) {
|
||||||
|
bit = free_space_test_bit(cache, path, offset);
|
||||||
|
if (prev_bit == 0 && bit == 1) {
|
||||||
|
extent_start = offset;
|
||||||
|
} else if (prev_bit == 1 && bit == 0) {
|
||||||
|
if (i >= num_extents)
|
||||||
|
goto invalid;
|
||||||
|
if (i >= num_extents ||
|
||||||
|
extent_start != extents[i].start ||
|
||||||
|
offset - extent_start != extents[i].length)
|
||||||
|
goto invalid;
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
prev_bit = bit;
|
||||||
|
offset += cache->sectorsize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (prev_bit == 1) {
|
||||||
|
if (i >= num_extents ||
|
||||||
|
extent_start != extents[i].start ||
|
||||||
|
end - extent_start != extents[i].length)
|
||||||
|
goto invalid;
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
if (i != num_extents)
|
||||||
|
goto invalid;
|
||||||
|
} else {
|
||||||
|
if (btrfs_header_nritems(path->nodes[0]) != num_extents + 1 ||
|
||||||
|
path->slots[0] != 0)
|
||||||
|
goto invalid;
|
||||||
|
for (i = 0; i < num_extents; i++) {
|
||||||
|
path->slots[0]++;
|
||||||
|
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
||||||
|
if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY ||
|
||||||
|
key.objectid != extents[i].start ||
|
||||||
|
key.offset != extents[i].length)
|
||||||
|
goto invalid;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = 0;
|
||||||
|
out:
|
||||||
|
btrfs_release_path(path);
|
||||||
|
return ret;
|
||||||
|
invalid:
|
||||||
|
test_msg("Free space tree is invalid\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int check_free_space_extents(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *cache,
|
||||||
|
struct btrfs_path *path,
|
||||||
|
struct free_space_extent *extents,
|
||||||
|
unsigned int num_extents)
|
||||||
|
{
|
||||||
|
struct btrfs_free_space_info *info;
|
||||||
|
u32 flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
info = search_free_space_info(trans, fs_info, cache, path, 0);
|
||||||
|
if (IS_ERR(info)) {
|
||||||
|
test_msg("Could not find free space info\n");
|
||||||
|
btrfs_release_path(path);
|
||||||
|
return PTR_ERR(info);
|
||||||
|
}
|
||||||
|
flags = btrfs_free_space_flags(path->nodes[0], info);
|
||||||
|
btrfs_release_path(path);
|
||||||
|
|
||||||
|
ret = __check_free_space_extents(trans, fs_info, cache, path, extents,
|
||||||
|
num_extents);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* Flip it to the other format and check that for good measure. */
|
||||||
|
if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
|
||||||
|
ret = convert_free_space_to_extents(trans, fs_info, cache, path);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not convert to extents\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ret = convert_free_space_to_bitmaps(trans, fs_info, cache, path);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not convert to bitmaps\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return __check_free_space_extents(trans, fs_info, cache, path, extents,
|
||||||
|
num_extents);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_empty_block_group(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *cache,
|
||||||
|
struct btrfs_path *path)
|
||||||
|
{
|
||||||
|
struct free_space_extent extents[] = {
|
||||||
|
{cache->key.objectid, cache->key.offset},
|
||||||
|
};
|
||||||
|
|
||||||
|
return check_free_space_extents(trans, fs_info, cache, path,
|
||||||
|
extents, ARRAY_SIZE(extents));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_remove_all(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *cache,
|
||||||
|
struct btrfs_path *path)
|
||||||
|
{
|
||||||
|
struct free_space_extent extents[] = {};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid,
|
||||||
|
cache->key.offset);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not remove free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return check_free_space_extents(trans, fs_info, cache, path,
|
||||||
|
extents, ARRAY_SIZE(extents));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_remove_beginning(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *cache,
|
||||||
|
struct btrfs_path *path)
|
||||||
|
{
|
||||||
|
struct free_space_extent extents[] = {
|
||||||
|
{cache->key.objectid + BITMAP_RANGE,
|
||||||
|
cache->key.offset - BITMAP_RANGE},
|
||||||
|
};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid, BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not remove free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return check_free_space_extents(trans, fs_info, cache, path,
|
||||||
|
extents, ARRAY_SIZE(extents));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_remove_end(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *cache,
|
||||||
|
struct btrfs_path *path)
|
||||||
|
{
|
||||||
|
struct free_space_extent extents[] = {
|
||||||
|
{cache->key.objectid, cache->key.offset - BITMAP_RANGE},
|
||||||
|
};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid +
|
||||||
|
cache->key.offset - BITMAP_RANGE,
|
||||||
|
BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not remove free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return check_free_space_extents(trans, fs_info, cache, path,
|
||||||
|
extents, ARRAY_SIZE(extents));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_remove_middle(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *cache,
|
||||||
|
struct btrfs_path *path)
|
||||||
|
{
|
||||||
|
struct free_space_extent extents[] = {
|
||||||
|
{cache->key.objectid, BITMAP_RANGE},
|
||||||
|
{cache->key.objectid + 2 * BITMAP_RANGE,
|
||||||
|
cache->key.offset - 2 * BITMAP_RANGE},
|
||||||
|
};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid + BITMAP_RANGE,
|
||||||
|
BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not remove free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return check_free_space_extents(trans, fs_info, cache, path,
|
||||||
|
extents, ARRAY_SIZE(extents));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_merge_left(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *cache,
|
||||||
|
struct btrfs_path *path)
|
||||||
|
{
|
||||||
|
struct free_space_extent extents[] = {
|
||||||
|
{cache->key.objectid, 2 * BITMAP_RANGE},
|
||||||
|
};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid,
|
||||||
|
cache->key.offset);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not remove free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid, BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not add free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid + BITMAP_RANGE,
|
||||||
|
BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not add free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return check_free_space_extents(trans, fs_info, cache, path,
|
||||||
|
extents, ARRAY_SIZE(extents));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_merge_right(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *cache,
|
||||||
|
struct btrfs_path *path)
|
||||||
|
{
|
||||||
|
struct free_space_extent extents[] = {
|
||||||
|
{cache->key.objectid + BITMAP_RANGE, 2 * BITMAP_RANGE},
|
||||||
|
};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid,
|
||||||
|
cache->key.offset);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not remove free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid + 2 * BITMAP_RANGE,
|
||||||
|
BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not add free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid + BITMAP_RANGE,
|
||||||
|
BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not add free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return check_free_space_extents(trans, fs_info, cache, path,
|
||||||
|
extents, ARRAY_SIZE(extents));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_merge_both(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *cache,
|
||||||
|
struct btrfs_path *path)
|
||||||
|
{
|
||||||
|
struct free_space_extent extents[] = {
|
||||||
|
{cache->key.objectid, 3 * BITMAP_RANGE},
|
||||||
|
};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid,
|
||||||
|
cache->key.offset);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not remove free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid, BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not add free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid + 2 * BITMAP_RANGE,
|
||||||
|
BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not add free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid + BITMAP_RANGE,
|
||||||
|
BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not add free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return check_free_space_extents(trans, fs_info, cache, path,
|
||||||
|
extents, ARRAY_SIZE(extents));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_merge_none(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_block_group_cache *cache,
|
||||||
|
struct btrfs_path *path)
|
||||||
|
{
|
||||||
|
struct free_space_extent extents[] = {
|
||||||
|
{cache->key.objectid, BITMAP_RANGE},
|
||||||
|
{cache->key.objectid + 2 * BITMAP_RANGE, BITMAP_RANGE},
|
||||||
|
{cache->key.objectid + 4 * BITMAP_RANGE, BITMAP_RANGE},
|
||||||
|
};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid,
|
||||||
|
cache->key.offset);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not remove free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid, BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not add free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid + 4 * BITMAP_RANGE,
|
||||||
|
BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not add free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
|
||||||
|
cache->key.objectid + 2 * BITMAP_RANGE,
|
||||||
|
BITMAP_RANGE);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not add free space\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return check_free_space_extents(trans, fs_info, cache, path,
|
||||||
|
extents, ARRAY_SIZE(extents));
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef int (*test_func_t)(struct btrfs_trans_handle *,
|
||||||
|
struct btrfs_fs_info *,
|
||||||
|
struct btrfs_block_group_cache *,
|
||||||
|
struct btrfs_path *);
|
||||||
|
|
||||||
|
static int run_test(test_func_t test_func, int bitmaps)
|
||||||
|
{
|
||||||
|
struct btrfs_root *root = NULL;
|
||||||
|
struct btrfs_block_group_cache *cache = NULL;
|
||||||
|
struct btrfs_trans_handle trans;
|
||||||
|
struct btrfs_path *path = NULL;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
root = btrfs_alloc_dummy_root();
|
||||||
|
if (IS_ERR(root)) {
|
||||||
|
test_msg("Couldn't allocate dummy root\n");
|
||||||
|
ret = PTR_ERR(root);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
root->fs_info = btrfs_alloc_dummy_fs_info();
|
||||||
|
if (!root->fs_info) {
|
||||||
|
test_msg("Couldn't allocate dummy fs info\n");
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
btrfs_set_super_compat_ro_flags(root->fs_info->super_copy,
|
||||||
|
BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE);
|
||||||
|
root->fs_info->free_space_root = root;
|
||||||
|
root->fs_info->tree_root = root;
|
||||||
|
|
||||||
|
root->node = alloc_test_extent_buffer(root->fs_info, 4096);
|
||||||
|
if (!root->node) {
|
||||||
|
test_msg("Couldn't allocate dummy buffer\n");
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
btrfs_set_header_level(root->node, 0);
|
||||||
|
btrfs_set_header_nritems(root->node, 0);
|
||||||
|
root->alloc_bytenr += 8192;
|
||||||
|
|
||||||
|
cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE);
|
||||||
|
if (!cache) {
|
||||||
|
test_msg("Couldn't allocate dummy block group cache\n");
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
cache->bitmap_low_thresh = 0;
|
||||||
|
cache->bitmap_high_thresh = (u32)-1;
|
||||||
|
cache->needs_free_space = 1;
|
||||||
|
|
||||||
|
btrfs_init_dummy_trans(&trans);
|
||||||
|
|
||||||
|
path = btrfs_alloc_path();
|
||||||
|
if (!path) {
|
||||||
|
test_msg("Couldn't allocate path\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = add_block_group_free_space(&trans, root->fs_info, cache);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not add block group free space\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bitmaps) {
|
||||||
|
ret = convert_free_space_to_bitmaps(&trans, root->fs_info,
|
||||||
|
cache, path);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not convert block group to bitmaps\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = test_func(&trans, root->fs_info, cache, path);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
ret = remove_block_group_free_space(&trans, root->fs_info, cache);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("Could not remove block group free space\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (btrfs_header_nritems(root->node) != 0) {
|
||||||
|
test_msg("Free space tree has leftover items\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = 0;
|
||||||
|
out:
|
||||||
|
btrfs_free_path(path);
|
||||||
|
btrfs_free_dummy_block_group(cache);
|
||||||
|
btrfs_free_dummy_root(root);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int run_test_both_formats(test_func_t test_func)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = run_test(test_func, 0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
return run_test(test_func, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
int btrfs_test_free_space_tree(void)
|
||||||
|
{
|
||||||
|
test_func_t tests[] = {
|
||||||
|
test_empty_block_group,
|
||||||
|
test_remove_all,
|
||||||
|
test_remove_beginning,
|
||||||
|
test_remove_end,
|
||||||
|
test_remove_middle,
|
||||||
|
test_merge_left,
|
||||||
|
test_merge_right,
|
||||||
|
test_merge_both,
|
||||||
|
test_merge_none,
|
||||||
|
};
|
||||||
|
int i;
|
||||||
|
|
||||||
|
test_msg("Running free space tree tests\n");
|
||||||
|
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||||
|
int ret = run_test_both_formats(tests[i]);
|
||||||
|
if (ret) {
|
||||||
|
test_msg("%pf failed\n", tests[i]);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
@ -100,7 +100,7 @@ static void insert_inode_item_key(struct btrfs_root *root)
|
|||||||
static void setup_file_extents(struct btrfs_root *root)
|
static void setup_file_extents(struct btrfs_root *root)
|
||||||
{
|
{
|
||||||
int slot = 0;
|
int slot = 0;
|
||||||
u64 disk_bytenr = 1 * 1024 * 1024;
|
u64 disk_bytenr = SZ_1M;
|
||||||
u64 offset = 0;
|
u64 offset = 0;
|
||||||
|
|
||||||
/* First we want a hole */
|
/* First we want a hole */
|
||||||
|
@ -23,14 +23,6 @@
|
|||||||
#include "../qgroup.h"
|
#include "../qgroup.h"
|
||||||
#include "../backref.h"
|
#include "../backref.h"
|
||||||
|
|
||||||
static void init_dummy_trans(struct btrfs_trans_handle *trans)
|
|
||||||
{
|
|
||||||
memset(trans, 0, sizeof(*trans));
|
|
||||||
trans->transid = 1;
|
|
||||||
INIT_LIST_HEAD(&trans->qgroup_ref_list);
|
|
||||||
trans->type = __TRANS_DUMMY;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
|
static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
|
||||||
u64 num_bytes, u64 parent, u64 root_objectid)
|
u64 num_bytes, u64 parent, u64 root_objectid)
|
||||||
{
|
{
|
||||||
@ -44,7 +36,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
|
|||||||
u32 size = sizeof(*item) + sizeof(*iref) + sizeof(*block_info);
|
u32 size = sizeof(*item) + sizeof(*iref) + sizeof(*block_info);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
init_dummy_trans(&trans);
|
btrfs_init_dummy_trans(&trans);
|
||||||
|
|
||||||
ins.objectid = bytenr;
|
ins.objectid = bytenr;
|
||||||
ins.type = BTRFS_EXTENT_ITEM_KEY;
|
ins.type = BTRFS_EXTENT_ITEM_KEY;
|
||||||
@ -94,7 +86,7 @@ static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
|
|||||||
u64 refs;
|
u64 refs;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
init_dummy_trans(&trans);
|
btrfs_init_dummy_trans(&trans);
|
||||||
|
|
||||||
key.objectid = bytenr;
|
key.objectid = bytenr;
|
||||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||||
@ -144,7 +136,7 @@ static int remove_extent_item(struct btrfs_root *root, u64 bytenr,
|
|||||||
struct btrfs_path *path;
|
struct btrfs_path *path;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
init_dummy_trans(&trans);
|
btrfs_init_dummy_trans(&trans);
|
||||||
|
|
||||||
key.objectid = bytenr;
|
key.objectid = bytenr;
|
||||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||||
@ -178,7 +170,7 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
|
|||||||
u64 refs;
|
u64 refs;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
init_dummy_trans(&trans);
|
btrfs_init_dummy_trans(&trans);
|
||||||
|
|
||||||
key.objectid = bytenr;
|
key.objectid = bytenr;
|
||||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||||
@ -232,7 +224,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
|
|||||||
struct ulist *new_roots = NULL;
|
struct ulist *new_roots = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
init_dummy_trans(&trans);
|
btrfs_init_dummy_trans(&trans);
|
||||||
|
|
||||||
test_msg("Qgroup basic add\n");
|
test_msg("Qgroup basic add\n");
|
||||||
ret = btrfs_create_qgroup(NULL, fs_info, 5);
|
ret = btrfs_create_qgroup(NULL, fs_info, 5);
|
||||||
@ -326,7 +318,7 @@ static int test_multiple_refs(struct btrfs_root *root)
|
|||||||
struct ulist *new_roots = NULL;
|
struct ulist *new_roots = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
init_dummy_trans(&trans);
|
btrfs_init_dummy_trans(&trans);
|
||||||
|
|
||||||
test_msg("Qgroup multiple refs test\n");
|
test_msg("Qgroup multiple refs test\n");
|
||||||
|
|
||||||
|
@ -75,6 +75,23 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
|
|||||||
list_del_init(&em->list);
|
list_del_init(&em->list);
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* If any block groups are found in ->deleted_bgs then it's
|
||||||
|
* because the transaction was aborted and a commit did not
|
||||||
|
* happen (things failed before writing the new superblock
|
||||||
|
* and calling btrfs_finish_extent_commit()), so we can not
|
||||||
|
* discard the physical locations of the block groups.
|
||||||
|
*/
|
||||||
|
while (!list_empty(&transaction->deleted_bgs)) {
|
||||||
|
struct btrfs_block_group_cache *cache;
|
||||||
|
|
||||||
|
cache = list_first_entry(&transaction->deleted_bgs,
|
||||||
|
struct btrfs_block_group_cache,
|
||||||
|
bg_list);
|
||||||
|
list_del_init(&cache->bg_list);
|
||||||
|
btrfs_put_block_group_trimming(cache);
|
||||||
|
btrfs_put_block_group(cache);
|
||||||
|
}
|
||||||
kmem_cache_free(btrfs_transaction_cachep, transaction);
|
kmem_cache_free(btrfs_transaction_cachep, transaction);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -634,17 +651,20 @@ struct btrfs_trans_handle *btrfs_start_transaction_lflush(
|
|||||||
|
|
||||||
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
|
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
|
||||||
{
|
{
|
||||||
return start_transaction(root, 0, TRANS_JOIN, 0);
|
return start_transaction(root, 0, TRANS_JOIN,
|
||||||
|
BTRFS_RESERVE_NO_FLUSH);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
|
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
|
||||||
{
|
{
|
||||||
return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
|
return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
|
||||||
|
BTRFS_RESERVE_NO_FLUSH);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
|
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
|
||||||
{
|
{
|
||||||
return start_transaction(root, 0, TRANS_USERSPACE, 0);
|
return start_transaction(root, 0, TRANS_USERSPACE,
|
||||||
|
BTRFS_RESERVE_NO_FLUSH);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -662,7 +682,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root
|
|||||||
*/
|
*/
|
||||||
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
|
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
|
||||||
{
|
{
|
||||||
return start_transaction(root, 0, TRANS_ATTACH, 0);
|
return start_transaction(root, 0, TRANS_ATTACH,
|
||||||
|
BTRFS_RESERVE_NO_FLUSH);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -677,7 +698,8 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
|
|||||||
{
|
{
|
||||||
struct btrfs_trans_handle *trans;
|
struct btrfs_trans_handle *trans;
|
||||||
|
|
||||||
trans = start_transaction(root, 0, TRANS_ATTACH, 0);
|
trans = start_transaction(root, 0, TRANS_ATTACH,
|
||||||
|
BTRFS_RESERVE_NO_FLUSH);
|
||||||
if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
|
if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
|
||||||
btrfs_wait_for_commit(root, 0);
|
btrfs_wait_for_commit(root, 0);
|
||||||
|
|
||||||
@ -1319,17 +1341,11 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||||||
u64 root_flags;
|
u64 root_flags;
|
||||||
uuid_le new_uuid;
|
uuid_le new_uuid;
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
ASSERT(pending->path);
|
||||||
if (!path) {
|
path = pending->path;
|
||||||
pending->error = -ENOMEM;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
|
ASSERT(pending->root_item);
|
||||||
if (!new_root_item) {
|
new_root_item = pending->root_item;
|
||||||
pending->error = -ENOMEM;
|
|
||||||
goto root_item_alloc_fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
pending->error = btrfs_find_free_objectid(tree_root, &objectid);
|
pending->error = btrfs_find_free_objectid(tree_root, &objectid);
|
||||||
if (pending->error)
|
if (pending->error)
|
||||||
@ -1562,8 +1578,10 @@ clear_skip_qgroup:
|
|||||||
btrfs_clear_skip_qgroup(trans);
|
btrfs_clear_skip_qgroup(trans);
|
||||||
no_free_objectid:
|
no_free_objectid:
|
||||||
kfree(new_root_item);
|
kfree(new_root_item);
|
||||||
root_item_alloc_fail:
|
pending->root_item = NULL;
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
|
pending->path = NULL;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,8 +137,10 @@ struct btrfs_pending_snapshot {
|
|||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
struct inode *dir;
|
struct inode *dir;
|
||||||
struct btrfs_root *root;
|
struct btrfs_root *root;
|
||||||
|
struct btrfs_root_item *root_item;
|
||||||
struct btrfs_root *snap;
|
struct btrfs_root *snap;
|
||||||
struct btrfs_qgroup_inherit *inherit;
|
struct btrfs_qgroup_inherit *inherit;
|
||||||
|
struct btrfs_path *path;
|
||||||
/* block reservation for the operation */
|
/* block reservation for the operation */
|
||||||
struct btrfs_block_rsv block_rsv;
|
struct btrfs_block_rsv block_rsv;
|
||||||
u64 qgroup_reserved;
|
u64 qgroup_reserved;
|
||||||
|
@ -89,6 +89,12 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
btrfs_release_path(path);
|
btrfs_release_path(path);
|
||||||
|
/*
|
||||||
|
* We don't need a lock on a leaf. btrfs_realloc_node() will lock all
|
||||||
|
* leafs from path->nodes[1], so set lowest_level to 1 to avoid later
|
||||||
|
* a deadlock (attempting to write lock an already write locked leaf).
|
||||||
|
*/
|
||||||
|
path->lowest_level = 1;
|
||||||
wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
||||||
|
|
||||||
if (wret < 0) {
|
if (wret < 0) {
|
||||||
@ -99,9 +105,12 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
|||||||
ret = 0;
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
path->slots[1] = btrfs_header_nritems(path->nodes[1]);
|
/*
|
||||||
next_key_ret = btrfs_find_next_key(root, path, &key, 1,
|
* The node at level 1 must always be locked when our path has
|
||||||
min_trans);
|
* keep_locks set and lowest_level is 1, regardless of the value of
|
||||||
|
* path->slots[1].
|
||||||
|
*/
|
||||||
|
BUG_ON(path->locks[1] == 0);
|
||||||
ret = btrfs_realloc_node(trans, root,
|
ret = btrfs_realloc_node(trans, root,
|
||||||
path->nodes[1], 0,
|
path->nodes[1], 0,
|
||||||
&last_ret,
|
&last_ret,
|
||||||
@ -110,6 +119,18 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
|||||||
WARN_ON(ret == -EAGAIN);
|
WARN_ON(ret == -EAGAIN);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* Now that we reallocated the node we can find the next key. Note that
|
||||||
|
* btrfs_find_next_key() can release our path and do another search
|
||||||
|
* without COWing, this is because even with path->keep_locks = 1,
|
||||||
|
* btrfs_search_slot() / ctree.c:unlock_up() does not keeps a lock on a
|
||||||
|
* node when path->slots[node_level - 1] does not point to the last
|
||||||
|
* item or a slot beyond the last item (ctree.c:unlock_up()). Therefore
|
||||||
|
* we search for the next key after reallocating our node.
|
||||||
|
*/
|
||||||
|
path->slots[1] = btrfs_header_nritems(path->nodes[1]);
|
||||||
|
next_key_ret = btrfs_find_next_key(root, path, &key, 1,
|
||||||
|
min_trans);
|
||||||
if (next_key_ret == 0) {
|
if (next_key_ret == 0) {
|
||||||
memcpy(&root->defrag_progress, &key, sizeof(key));
|
memcpy(&root->defrag_progress, &key, sizeof(key));
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
|
@ -125,6 +125,7 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
|
|||||||
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
|
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
|
||||||
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
|
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
|
||||||
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
|
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
|
||||||
|
static void btrfs_close_one_device(struct btrfs_device *device);
|
||||||
|
|
||||||
DEFINE_MUTEX(uuid_mutex);
|
DEFINE_MUTEX(uuid_mutex);
|
||||||
static LIST_HEAD(fs_uuids);
|
static LIST_HEAD(fs_uuids);
|
||||||
@ -1102,7 +1103,7 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
|
|||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
path->reada = 2;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
key.objectid = device->devid;
|
key.objectid = device->devid;
|
||||||
key.offset = start;
|
key.offset = start;
|
||||||
@ -1257,6 +1258,15 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
|
|||||||
int ret;
|
int ret;
|
||||||
int slot;
|
int slot;
|
||||||
struct extent_buffer *l;
|
struct extent_buffer *l;
|
||||||
|
u64 min_search_start;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't want to overwrite the superblock on the drive nor any area
|
||||||
|
* used by the boot loader (grub for example), so we make sure to start
|
||||||
|
* at an offset of at least 1MB.
|
||||||
|
*/
|
||||||
|
min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
|
||||||
|
search_start = max(search_start, min_search_start);
|
||||||
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
@ -1271,7 +1281,7 @@ again:
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
path->reada = 2;
|
path->reada = READA_FORWARD;
|
||||||
path->search_commit_root = 1;
|
path->search_commit_root = 1;
|
||||||
path->skip_locking = 1;
|
path->skip_locking = 1;
|
||||||
|
|
||||||
@ -1397,18 +1407,9 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
|
|||||||
struct btrfs_device *device, u64 num_bytes,
|
struct btrfs_device *device, u64 num_bytes,
|
||||||
u64 *start, u64 *len)
|
u64 *start, u64 *len)
|
||||||
{
|
{
|
||||||
struct btrfs_root *root = device->dev_root;
|
|
||||||
u64 search_start;
|
|
||||||
|
|
||||||
/* FIXME use last free of some kind */
|
/* FIXME use last free of some kind */
|
||||||
|
|
||||||
/*
|
|
||||||
* we don't want to overwrite the superblock on the drive,
|
|
||||||
* so we make sure to start at an offset of at least 1MB
|
|
||||||
*/
|
|
||||||
search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
|
|
||||||
return find_free_dev_extent_start(trans->transaction, device,
|
return find_free_dev_extent_start(trans->transaction, device,
|
||||||
num_bytes, search_start, start, len);
|
num_bytes, 0, start, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
|
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
|
||||||
@ -1642,7 +1643,6 @@ static void update_dev_time(char *path_name)
|
|||||||
return;
|
return;
|
||||||
file_update_time(filp);
|
file_update_time(filp);
|
||||||
filp_close(filp, NULL);
|
filp_close(filp, NULL);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btrfs_rm_dev_item(struct btrfs_root *root,
|
static int btrfs_rm_dev_item(struct btrfs_root *root,
|
||||||
@ -3406,7 +3406,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
|
|||||||
list_for_each_entry(device, devices, dev_list) {
|
list_for_each_entry(device, devices, dev_list) {
|
||||||
old_size = btrfs_device_get_total_bytes(device);
|
old_size = btrfs_device_get_total_bytes(device);
|
||||||
size_to_free = div_factor(old_size, 1);
|
size_to_free = div_factor(old_size, 1);
|
||||||
size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
|
size_to_free = min_t(u64, size_to_free, SZ_1M);
|
||||||
if (!device->writeable ||
|
if (!device->writeable ||
|
||||||
btrfs_device_get_total_bytes(device) -
|
btrfs_device_get_total_bytes(device) -
|
||||||
btrfs_device_get_bytes_used(device) > size_to_free ||
|
btrfs_device_get_bytes_used(device) > size_to_free ||
|
||||||
@ -3723,14 +3723,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allow dup'ed data chunks only in mixed mode */
|
|
||||||
if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
|
|
||||||
(bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
|
|
||||||
btrfs_err(fs_info, "dup for data is not allowed");
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* allow to reduce meta or sys integrity only if force set */
|
/* allow to reduce meta or sys integrity only if force set */
|
||||||
allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
|
allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
|
||||||
BTRFS_BLOCK_GROUP_RAID10 |
|
BTRFS_BLOCK_GROUP_RAID10 |
|
||||||
@ -3756,6 +3748,13 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
|
|||||||
}
|
}
|
||||||
} while (read_seqretry(&fs_info->profiles_lock, seq));
|
} while (read_seqretry(&fs_info->profiles_lock, seq));
|
||||||
|
|
||||||
|
if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
|
||||||
|
btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
|
||||||
|
btrfs_warn(fs_info,
|
||||||
|
"metatdata profile 0x%llx has lower redundancy than data profile 0x%llx",
|
||||||
|
bctl->meta.target, bctl->data.target);
|
||||||
|
}
|
||||||
|
|
||||||
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
|
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
|
||||||
fs_info->num_tolerated_disk_barrier_failures = min(
|
fs_info->num_tolerated_disk_barrier_failures = min(
|
||||||
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
|
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
|
||||||
@ -4268,7 +4267,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
|
|||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
path->reada = 2;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
lock_chunks(root);
|
lock_chunks(root);
|
||||||
|
|
||||||
@ -4460,7 +4459,7 @@ static int btrfs_cmp_device_info(const void *a, const void *b)
|
|||||||
static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
|
static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
|
||||||
{
|
{
|
||||||
/* TODO allow them to set a preferred stripe size */
|
/* TODO allow them to set a preferred stripe size */
|
||||||
return 64 * 1024;
|
return SZ_64K;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
|
static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
|
||||||
@ -4528,21 +4527,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||||||
ncopies = btrfs_raid_array[index].ncopies;
|
ncopies = btrfs_raid_array[index].ncopies;
|
||||||
|
|
||||||
if (type & BTRFS_BLOCK_GROUP_DATA) {
|
if (type & BTRFS_BLOCK_GROUP_DATA) {
|
||||||
max_stripe_size = 1024 * 1024 * 1024;
|
max_stripe_size = SZ_1G;
|
||||||
max_chunk_size = 10 * max_stripe_size;
|
max_chunk_size = 10 * max_stripe_size;
|
||||||
if (!devs_max)
|
if (!devs_max)
|
||||||
devs_max = BTRFS_MAX_DEVS(info->chunk_root);
|
devs_max = BTRFS_MAX_DEVS(info->chunk_root);
|
||||||
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
|
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
|
||||||
/* for larger filesystems, use larger metadata chunks */
|
/* for larger filesystems, use larger metadata chunks */
|
||||||
if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
|
if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
|
||||||
max_stripe_size = 1024 * 1024 * 1024;
|
max_stripe_size = SZ_1G;
|
||||||
else
|
else
|
||||||
max_stripe_size = 256 * 1024 * 1024;
|
max_stripe_size = SZ_256M;
|
||||||
max_chunk_size = max_stripe_size;
|
max_chunk_size = max_stripe_size;
|
||||||
if (!devs_max)
|
if (!devs_max)
|
||||||
devs_max = BTRFS_MAX_DEVS(info->chunk_root);
|
devs_max = BTRFS_MAX_DEVS(info->chunk_root);
|
||||||
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
|
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
|
||||||
max_stripe_size = 32 * 1024 * 1024;
|
max_stripe_size = SZ_32M;
|
||||||
max_chunk_size = 2 * max_stripe_size;
|
max_chunk_size = 2 * max_stripe_size;
|
||||||
if (!devs_max)
|
if (!devs_max)
|
||||||
devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
|
devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
|
||||||
@ -4793,7 +4792,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
|
|||||||
u64 dev_offset;
|
u64 dev_offset;
|
||||||
u64 stripe_size;
|
u64 stripe_size;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
em_tree = &extent_root->fs_info->mapping_tree.map_tree;
|
em_tree = &extent_root->fs_info->mapping_tree.map_tree;
|
||||||
read_lock(&em_tree->lock);
|
read_lock(&em_tree->lock);
|
||||||
@ -4824,20 +4823,32 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Take the device list mutex to prevent races with the final phase of
|
||||||
|
* a device replace operation that replaces the device object associated
|
||||||
|
* with the map's stripes, because the device object's id can change
|
||||||
|
* at any time during that final phase of the device replace operation
|
||||||
|
* (dev-replace.c:btrfs_dev_replace_finishing()).
|
||||||
|
*/
|
||||||
|
mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
|
||||||
for (i = 0; i < map->num_stripes; i++) {
|
for (i = 0; i < map->num_stripes; i++) {
|
||||||
device = map->stripes[i].dev;
|
device = map->stripes[i].dev;
|
||||||
dev_offset = map->stripes[i].physical;
|
dev_offset = map->stripes[i].physical;
|
||||||
|
|
||||||
ret = btrfs_update_device(trans, device);
|
ret = btrfs_update_device(trans, device);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
break;
|
||||||
ret = btrfs_alloc_dev_extent(trans, device,
|
ret = btrfs_alloc_dev_extent(trans, device,
|
||||||
chunk_root->root_key.objectid,
|
chunk_root->root_key.objectid,
|
||||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
|
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
|
||||||
chunk_offset, dev_offset,
|
chunk_offset, dev_offset,
|
||||||
stripe_size);
|
stripe_size);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
break;
|
||||||
|
}
|
||||||
|
if (ret) {
|
||||||
|
mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
stripe = &chunk->stripe;
|
stripe = &chunk->stripe;
|
||||||
@ -4850,6 +4861,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
|
|||||||
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
|
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
|
||||||
stripe++;
|
stripe++;
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
|
||||||
|
|
||||||
btrfs_set_stack_chunk_length(chunk, chunk_size);
|
btrfs_set_stack_chunk_length(chunk, chunk_size);
|
||||||
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
|
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
|
||||||
@ -6465,11 +6477,11 @@ int btrfs_read_sys_array(struct btrfs_root *root)
|
|||||||
sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
|
sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
|
||||||
if (!sb)
|
if (!sb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
btrfs_set_buffer_uptodate(sb);
|
set_extent_buffer_uptodate(sb);
|
||||||
btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
|
btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
|
||||||
/*
|
/*
|
||||||
* The sb extent buffer is artifical and just used to read the system array.
|
* The sb extent buffer is artifical and just used to read the system array.
|
||||||
* btrfs_set_buffer_uptodate() call does not properly mark all it's
|
* set_extent_buffer_uptodate() call does not properly mark all it's
|
||||||
* pages up-to-date when the page is larger: extent does not cover the
|
* pages up-to-date when the page is larger: extent does not cover the
|
||||||
* whole page and consequently check_page_uptodate does not find all
|
* whole page and consequently check_page_uptodate does not find all
|
||||||
* the page's extents up-to-date (the hole beyond sb),
|
* the page's extents up-to-date (the hole beyond sb),
|
||||||
@ -6512,6 +6524,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
|
|||||||
goto out_short_read;
|
goto out_short_read;
|
||||||
|
|
||||||
num_stripes = btrfs_chunk_num_stripes(sb, chunk);
|
num_stripes = btrfs_chunk_num_stripes(sb, chunk);
|
||||||
|
if (!num_stripes) {
|
||||||
|
printk(KERN_ERR
|
||||||
|
"BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
|
||||||
|
num_stripes, cur_offset);
|
||||||
|
ret = -EIO;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
len = btrfs_chunk_item_size(num_stripes);
|
len = btrfs_chunk_item_size(num_stripes);
|
||||||
if (cur_offset + len > array_size)
|
if (cur_offset + len > array_size)
|
||||||
goto out_short_read;
|
goto out_short_read;
|
||||||
@ -6520,6 +6540,9 @@ int btrfs_read_sys_array(struct btrfs_root *root)
|
|||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
|
printk(KERN_ERR
|
||||||
|
"BTRFS: unexpected item type %u in sys_array at offset %u\n",
|
||||||
|
(u32)key.type, cur_offset);
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -6949,7 +6972,7 @@ void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void btrfs_close_one_device(struct btrfs_device *device)
|
static void btrfs_close_one_device(struct btrfs_device *device)
|
||||||
{
|
{
|
||||||
struct btrfs_fs_devices *fs_devices = device->fs_devices;
|
struct btrfs_fs_devices *fs_devices = device->fs_devices;
|
||||||
struct btrfs_device *new_device;
|
struct btrfs_device *new_device;
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
|
|
||||||
extern struct mutex uuid_mutex;
|
extern struct mutex uuid_mutex;
|
||||||
|
|
||||||
#define BTRFS_STRIPE_LEN (64 * 1024)
|
#define BTRFS_STRIPE_LEN SZ_64K
|
||||||
|
|
||||||
struct buffer_head;
|
struct buffer_head;
|
||||||
struct btrfs_pending_bios {
|
struct btrfs_pending_bios {
|
||||||
@ -566,6 +566,5 @@ static inline void unlock_chunks(struct btrfs_root *root)
|
|||||||
struct list_head *btrfs_get_fs_uuids(void);
|
struct list_head *btrfs_get_fs_uuids(void);
|
||||||
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
|
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
|
||||||
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
|
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
|
||||||
void btrfs_close_one_device(struct btrfs_device *device);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -283,7 +283,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
|||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
path->reada = 2;
|
path->reada = READA_FORWARD;
|
||||||
|
|
||||||
/* search for our xattrs */
|
/* search for our xattrs */
|
||||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||||
@ -446,7 +446,7 @@ static int btrfs_initxattrs(struct inode *inode,
|
|||||||
|
|
||||||
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
|
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
|
||||||
name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
|
name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
|
||||||
strlen(xattr->name) + 1, GFP_NOFS);
|
strlen(xattr->name) + 1, GFP_KERNEL);
|
||||||
if (!name) {
|
if (!name) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
break;
|
break;
|
||||||
|
@ -45,7 +45,8 @@ struct btrfs_qgroup_operation;
|
|||||||
{ BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
|
{ BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
|
||||||
{ BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \
|
{ BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \
|
||||||
{ BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
|
{ BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
|
||||||
{ BTRFS_UUID_TREE_OBJECTID, "UUID_RELOC" }, \
|
{ BTRFS_UUID_TREE_OBJECTID, "UUID_TREE" }, \
|
||||||
|
{ BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" }, \
|
||||||
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
|
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
|
||||||
|
|
||||||
#define show_root_type(obj) \
|
#define show_root_type(obj) \
|
||||||
|
Loading…
Reference in New Issue
Block a user