mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 22:51:35 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: Btrfs: check return value of alloc_extent_map() Btrfs - Fix memory leak in btrfs_init_new_device() btrfs: prevent heap corruption in btrfs_ioctl_space_info() Btrfs: Fix balance panic Btrfs: don't release pages when we can't clear the uptodate bits Btrfs: fix page->private races
This commit is contained in:
commit
007a14af26
@ -359,10 +359,14 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
|
||||
|
||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||
|
||||
if (page->private == EXTENT_PAGE_PRIVATE)
|
||||
if (page->private == EXTENT_PAGE_PRIVATE) {
|
||||
WARN_ON(1);
|
||||
goto out;
|
||||
if (!page->private)
|
||||
}
|
||||
if (!page->private) {
|
||||
WARN_ON(1);
|
||||
goto out;
|
||||
}
|
||||
len = page->private >> 2;
|
||||
WARN_ON(len == 0);
|
||||
|
||||
|
@ -6583,7 +6583,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode,
|
||||
u64 end = start + extent_key->offset - 1;
|
||||
|
||||
em = alloc_extent_map(GFP_NOFS);
|
||||
BUG_ON(!em || IS_ERR(em));
|
||||
BUG_ON(!em);
|
||||
|
||||
em->start = start;
|
||||
em->len = extent_key->offset;
|
||||
|
@ -1946,6 +1946,7 @@ void set_page_extent_mapped(struct page *page)
|
||||
|
||||
static void set_page_extent_head(struct page *page, unsigned long len)
|
||||
{
|
||||
WARN_ON(!PagePrivate(page));
|
||||
set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
|
||||
}
|
||||
|
||||
@ -2821,9 +2822,17 @@ int try_release_extent_state(struct extent_map_tree *map,
|
||||
* at this point we can safely clear everything except the
|
||||
* locked bit and the nodatasum bit
|
||||
*/
|
||||
clear_extent_bit(tree, start, end,
|
||||
ret = clear_extent_bit(tree, start, end,
|
||||
~(EXTENT_LOCKED | EXTENT_NODATASUM),
|
||||
0, 0, NULL, mask);
|
||||
|
||||
/* if clear_extent_bit failed for enomem reasons,
|
||||
* we can't allow the release to continue.
|
||||
*/
|
||||
if (ret < 0)
|
||||
ret = 0;
|
||||
else
|
||||
ret = 1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -3194,7 +3203,13 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
|
||||
}
|
||||
if (!PageUptodate(p))
|
||||
uptodate = 0;
|
||||
unlock_page(p);
|
||||
|
||||
/*
|
||||
* see below about how we avoid a nasty race with release page
|
||||
* and why we unlock later
|
||||
*/
|
||||
if (i != 0)
|
||||
unlock_page(p);
|
||||
}
|
||||
if (uptodate)
|
||||
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
|
||||
@ -3218,9 +3233,26 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
|
||||
atomic_inc(&eb->refs);
|
||||
spin_unlock(&tree->buffer_lock);
|
||||
radix_tree_preload_end();
|
||||
|
||||
/*
|
||||
* there is a race where release page may have
|
||||
* tried to find this extent buffer in the radix
|
||||
* but failed. It will tell the VM it is safe to
|
||||
* reclaim the, and it will clear the page private bit.
|
||||
* We must make sure to set the page private bit properly
|
||||
* after the extent buffer is in the radix tree so
|
||||
* it doesn't get lost
|
||||
*/
|
||||
set_page_extent_mapped(eb->first_page);
|
||||
set_page_extent_head(eb->first_page, eb->len);
|
||||
if (!page0)
|
||||
unlock_page(eb->first_page);
|
||||
return eb;
|
||||
|
||||
free_eb:
|
||||
if (eb->first_page && !page0)
|
||||
unlock_page(eb->first_page);
|
||||
|
||||
if (!atomic_dec_and_test(&eb->refs))
|
||||
return exists;
|
||||
btrfs_release_extent_buffer(eb);
|
||||
@ -3271,10 +3303,11 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
|
||||
continue;
|
||||
|
||||
lock_page(page);
|
||||
WARN_ON(!PagePrivate(page));
|
||||
|
||||
set_page_extent_mapped(page);
|
||||
if (i == 0)
|
||||
set_page_extent_head(page, eb->len);
|
||||
else
|
||||
set_page_private(page, EXTENT_PAGE_PRIVATE);
|
||||
|
||||
clear_page_dirty_for_io(page);
|
||||
spin_lock_irq(&page->mapping->tree_lock);
|
||||
@ -3464,6 +3497,13 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
||||
|
||||
for (i = start_i; i < num_pages; i++) {
|
||||
page = extent_buffer_page(eb, i);
|
||||
|
||||
WARN_ON(!PagePrivate(page));
|
||||
|
||||
set_page_extent_mapped(page);
|
||||
if (i == 0)
|
||||
set_page_extent_head(page, eb->len);
|
||||
|
||||
if (inc_all_pages)
|
||||
page_cache_get(page);
|
||||
if (!PageUptodate(page)) {
|
||||
|
@ -51,8 +51,8 @@ struct extent_map *alloc_extent_map(gfp_t mask)
|
||||
{
|
||||
struct extent_map *em;
|
||||
em = kmem_cache_alloc(extent_map_cache, mask);
|
||||
if (!em || IS_ERR(em))
|
||||
return em;
|
||||
if (!em)
|
||||
return NULL;
|
||||
em->in_tree = 0;
|
||||
em->flags = 0;
|
||||
em->compress_type = BTRFS_COMPRESS_NONE;
|
||||
|
@ -186,6 +186,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
|
||||
split = alloc_extent_map(GFP_NOFS);
|
||||
if (!split2)
|
||||
split2 = alloc_extent_map(GFP_NOFS);
|
||||
BUG_ON(!split || !split2);
|
||||
|
||||
write_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree, start, len);
|
||||
|
@ -644,6 +644,7 @@ retry:
|
||||
async_extent->ram_size - 1, 0);
|
||||
|
||||
em = alloc_extent_map(GFP_NOFS);
|
||||
BUG_ON(!em);
|
||||
em->start = async_extent->start;
|
||||
em->len = async_extent->ram_size;
|
||||
em->orig_start = em->start;
|
||||
@ -820,6 +821,7 @@ static noinline int cow_file_range(struct inode *inode,
|
||||
BUG_ON(ret);
|
||||
|
||||
em = alloc_extent_map(GFP_NOFS);
|
||||
BUG_ON(!em);
|
||||
em->start = start;
|
||||
em->orig_start = em->start;
|
||||
ram_size = ins.offset;
|
||||
@ -1169,6 +1171,7 @@ out_check:
|
||||
struct extent_map_tree *em_tree;
|
||||
em_tree = &BTRFS_I(inode)->extent_tree;
|
||||
em = alloc_extent_map(GFP_NOFS);
|
||||
BUG_ON(!em);
|
||||
em->start = cur_offset;
|
||||
em->orig_start = em->start;
|
||||
em->len = num_bytes;
|
||||
|
@ -2208,7 +2208,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
|
||||
int num_types = 4;
|
||||
int alloc_size;
|
||||
int ret = 0;
|
||||
int slot_count = 0;
|
||||
u64 slot_count = 0;
|
||||
int i, c;
|
||||
|
||||
if (copy_from_user(&space_args,
|
||||
@ -2247,7 +2247,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
|
||||
goto out;
|
||||
}
|
||||
|
||||
slot_count = min_t(int, space_args.space_slots, slot_count);
|
||||
slot_count = min_t(u64, space_args.space_slots, slot_count);
|
||||
|
||||
alloc_size = sizeof(*dest) * slot_count;
|
||||
|
||||
@ -2267,6 +2267,9 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
|
||||
for (i = 0; i < num_types; i++) {
|
||||
struct btrfs_space_info *tmp;
|
||||
|
||||
if (!slot_count)
|
||||
break;
|
||||
|
||||
info = NULL;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
|
||||
@ -2288,7 +2291,10 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
|
||||
memcpy(dest, &space, sizeof(space));
|
||||
dest++;
|
||||
space_args.total_spaces++;
|
||||
slot_count--;
|
||||
}
|
||||
if (!slot_count)
|
||||
break;
|
||||
}
|
||||
up_read(&info->groups_sem);
|
||||
}
|
||||
|
@ -1157,6 +1157,7 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
|
||||
new_node->bytenr = dest->node->start;
|
||||
new_node->level = node->level;
|
||||
new_node->lowest = node->lowest;
|
||||
new_node->checked = 1;
|
||||
new_node->root = dest;
|
||||
|
||||
if (!node->lowest) {
|
||||
|
@ -1605,12 +1605,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
|
||||
|
||||
ret = find_next_devid(root, &device->devid);
|
||||
if (ret) {
|
||||
kfree(device->name);
|
||||
kfree(device);
|
||||
goto error;
|
||||
}
|
||||
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans)) {
|
||||
kfree(device->name);
|
||||
kfree(device);
|
||||
ret = PTR_ERR(trans);
|
||||
goto error;
|
||||
|
Loading…
Reference in New Issue
Block a user