forked from Minki/linux
Btrfs: use a cached state for extent state operations during delalloc
This changes the btrfs code to find delalloc ranges in the extent state tree to use the new state caching code from set/test bit. It reduces one of the biggest causes of rbtree searches in the writeback path. test_range_bit is also modified to take the cached state as a starting point while searching. Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
parent
d5550c6315
commit
9655d2982b
@ -720,6 +720,13 @@ again:
|
|||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&tree->lock);
|
spin_lock(&tree->lock);
|
||||||
|
if (cached_state && *cached_state) {
|
||||||
|
state = *cached_state;
|
||||||
|
if (state->start == start && state->tree) {
|
||||||
|
node = &state->rb_node;
|
||||||
|
goto hit_next;
|
||||||
|
}
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* this search will find all the extents that end after
|
* this search will find all the extents that end after
|
||||||
* our range starts.
|
* our range starts.
|
||||||
@ -1286,6 +1293,7 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
|
|||||||
u64 delalloc_start;
|
u64 delalloc_start;
|
||||||
u64 delalloc_end;
|
u64 delalloc_end;
|
||||||
u64 found;
|
u64 found;
|
||||||
|
struct extent_state *cached_state = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
int loops = 0;
|
int loops = 0;
|
||||||
|
|
||||||
@ -1323,6 +1331,7 @@ again:
|
|||||||
/* some of the pages are gone, lets avoid looping by
|
/* some of the pages are gone, lets avoid looping by
|
||||||
* shortening the size of the delalloc range we're searching
|
* shortening the size of the delalloc range we're searching
|
||||||
*/
|
*/
|
||||||
|
free_extent_state(cached_state);
|
||||||
if (!loops) {
|
if (!loops) {
|
||||||
unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
|
unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
|
||||||
max_bytes = PAGE_CACHE_SIZE - offset;
|
max_bytes = PAGE_CACHE_SIZE - offset;
|
||||||
@ -1336,18 +1345,21 @@ again:
|
|||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
|
|
||||||
/* step three, lock the state bits for the whole range */
|
/* step three, lock the state bits for the whole range */
|
||||||
lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
|
lock_extent_bits(tree, delalloc_start, delalloc_end,
|
||||||
|
0, &cached_state, GFP_NOFS);
|
||||||
|
|
||||||
/* then test to make sure it is all still delalloc */
|
/* then test to make sure it is all still delalloc */
|
||||||
ret = test_range_bit(tree, delalloc_start, delalloc_end,
|
ret = test_range_bit(tree, delalloc_start, delalloc_end,
|
||||||
EXTENT_DELALLOC, 1);
|
EXTENT_DELALLOC, 1, cached_state);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
|
unlock_extent_cached(tree, delalloc_start, delalloc_end,
|
||||||
|
&cached_state, GFP_NOFS);
|
||||||
__unlock_for_delalloc(inode, locked_page,
|
__unlock_for_delalloc(inode, locked_page,
|
||||||
delalloc_start, delalloc_end);
|
delalloc_start, delalloc_end);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
free_extent_state(cached_state);
|
||||||
*start = delalloc_start;
|
*start = delalloc_start;
|
||||||
*end = delalloc_end;
|
*end = delalloc_end;
|
||||||
out_failed:
|
out_failed:
|
||||||
@ -1530,14 +1542,17 @@ out:
|
|||||||
* range is found set.
|
* range is found set.
|
||||||
*/
|
*/
|
||||||
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
int bits, int filled)
|
int bits, int filled, struct extent_state *cached)
|
||||||
{
|
{
|
||||||
struct extent_state *state = NULL;
|
struct extent_state *state = NULL;
|
||||||
struct rb_node *node;
|
struct rb_node *node;
|
||||||
int bitset = 0;
|
int bitset = 0;
|
||||||
|
|
||||||
spin_lock(&tree->lock);
|
spin_lock(&tree->lock);
|
||||||
node = tree_search(tree, start);
|
if (cached && cached->tree && cached->start == start)
|
||||||
|
node = &cached->rb_node;
|
||||||
|
else
|
||||||
|
node = tree_search(tree, start);
|
||||||
while (node && start <= end) {
|
while (node && start <= end) {
|
||||||
state = rb_entry(node, struct extent_state, rb_node);
|
state = rb_entry(node, struct extent_state, rb_node);
|
||||||
|
|
||||||
@ -1580,7 +1595,7 @@ static int check_page_uptodate(struct extent_io_tree *tree,
|
|||||||
{
|
{
|
||||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||||
if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
|
if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
|
||||||
SetPageUptodate(page);
|
SetPageUptodate(page);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1594,7 +1609,7 @@ static int check_page_locked(struct extent_io_tree *tree,
|
|||||||
{
|
{
|
||||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||||
if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
|
if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2032,7 +2047,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/* the get_extent function already copied into the page */
|
/* the get_extent function already copied into the page */
|
||||||
if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
|
if (test_range_bit(tree, cur, cur_end,
|
||||||
|
EXTENT_UPTODATE, 1, NULL)) {
|
||||||
check_page_uptodate(tree, page);
|
check_page_uptodate(tree, page);
|
||||||
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
|
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
|
||||||
cur = cur + iosize;
|
cur = cur + iosize;
|
||||||
@ -2305,7 +2321,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
|||||||
}
|
}
|
||||||
/* leave this out until we have a page_mkwrite call */
|
/* leave this out until we have a page_mkwrite call */
|
||||||
if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
|
if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
|
||||||
EXTENT_DIRTY, 0)) {
|
EXTENT_DIRTY, 0, NULL)) {
|
||||||
cur = cur + iosize;
|
cur = cur + iosize;
|
||||||
pg_offset += iosize;
|
pg_offset += iosize;
|
||||||
continue;
|
continue;
|
||||||
@ -2721,7 +2737,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
|
|||||||
!isnew && !PageUptodate(page) &&
|
!isnew && !PageUptodate(page) &&
|
||||||
(block_off_end > to || block_off_start < from) &&
|
(block_off_end > to || block_off_start < from) &&
|
||||||
!test_range_bit(tree, block_start, cur_end,
|
!test_range_bit(tree, block_start, cur_end,
|
||||||
EXTENT_UPTODATE, 1)) {
|
EXTENT_UPTODATE, 1, NULL)) {
|
||||||
u64 sector;
|
u64 sector;
|
||||||
u64 extent_offset = block_start - em->start;
|
u64 extent_offset = block_start - em->start;
|
||||||
size_t iosize;
|
size_t iosize;
|
||||||
@ -2776,7 +2792,7 @@ int try_release_extent_state(struct extent_map_tree *map,
|
|||||||
int ret = 1;
|
int ret = 1;
|
||||||
|
|
||||||
if (test_range_bit(tree, start, end,
|
if (test_range_bit(tree, start, end,
|
||||||
EXTENT_IOBITS | EXTENT_ORDERED, 0))
|
EXTENT_IOBITS | EXTENT_ORDERED, 0, NULL))
|
||||||
ret = 0;
|
ret = 0;
|
||||||
else {
|
else {
|
||||||
if ((mask & GFP_NOFS) == GFP_NOFS)
|
if ((mask & GFP_NOFS) == GFP_NOFS)
|
||||||
@ -2821,7 +2837,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
|
|||||||
extent_map_end(em) - 1,
|
extent_map_end(em) - 1,
|
||||||
EXTENT_LOCKED | EXTENT_WRITEBACK |
|
EXTENT_LOCKED | EXTENT_WRITEBACK |
|
||||||
EXTENT_ORDERED,
|
EXTENT_ORDERED,
|
||||||
0)) {
|
0, NULL)) {
|
||||||
remove_extent_mapping(map, em);
|
remove_extent_mapping(map, em);
|
||||||
/* once for the rb tree */
|
/* once for the rb tree */
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
@ -3237,7 +3253,7 @@ int extent_range_uptodate(struct extent_io_tree *tree,
|
|||||||
int uptodate;
|
int uptodate;
|
||||||
unsigned long index;
|
unsigned long index;
|
||||||
|
|
||||||
ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
|
ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return 1;
|
return 1;
|
||||||
while (start <= end) {
|
while (start <= end) {
|
||||||
@ -3267,7 +3283,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
|
|||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
|
ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
|
||||||
EXTENT_UPTODATE, 1);
|
EXTENT_UPTODATE, 1, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -3303,7 +3319,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
|
if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
|
||||||
EXTENT_UPTODATE, 1)) {
|
EXTENT_UPTODATE, 1, NULL)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
|
|||||||
u64 max_bytes, unsigned long bits);
|
u64 max_bytes, unsigned long bits);
|
||||||
|
|
||||||
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
int bits, int filled);
|
int bits, int filled, struct extent_state *cached_state);
|
||||||
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
int bits, gfp_t mask);
|
int bits, gfp_t mask);
|
||||||
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
|
@ -1376,7 +1376,7 @@ again:
|
|||||||
|
|
||||||
/* already ordered? We're done */
|
/* already ordered? We're done */
|
||||||
if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
|
if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
|
||||||
EXTENT_ORDERED, 0)) {
|
EXTENT_ORDERED, 0, NULL)) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1417,7 +1417,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
|
ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
|
||||||
EXTENT_ORDERED, 0);
|
EXTENT_ORDERED, 0, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -1795,7 +1795,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
|
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
|
||||||
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
|
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
|
||||||
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
|
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
|
||||||
GFP_NOFS);
|
GFP_NOFS);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -262,7 +262,7 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
|
|||||||
|
|
||||||
ret = test_range_bit(io_tree, entry->file_offset,
|
ret = test_range_bit(io_tree, entry->file_offset,
|
||||||
entry->file_offset + entry->len - 1,
|
entry->file_offset + entry->len - 1,
|
||||||
EXTENT_ORDERED, 0);
|
EXTENT_ORDERED, 0, NULL);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
|
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
|
||||||
out:
|
out:
|
||||||
@ -522,7 +522,7 @@ again:
|
|||||||
end--;
|
end--;
|
||||||
}
|
}
|
||||||
if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
|
if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
|
||||||
EXTENT_ORDERED | EXTENT_DELALLOC, 0)) {
|
EXTENT_ORDERED | EXTENT_DELALLOC, 0, NULL)) {
|
||||||
schedule_timeout(1);
|
schedule_timeout(1);
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
@ -613,7 +613,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
|
|||||||
*/
|
*/
|
||||||
if (test_range_bit(io_tree, disk_i_size,
|
if (test_range_bit(io_tree, disk_i_size,
|
||||||
ordered->file_offset + ordered->len - 1,
|
ordered->file_offset + ordered->len - 1,
|
||||||
EXTENT_DELALLOC, 0)) {
|
EXTENT_DELALLOC, 0, NULL)) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -664,7 +664,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
|
|||||||
*/
|
*/
|
||||||
if (i_size_test > entry_end(ordered) &&
|
if (i_size_test > entry_end(ordered) &&
|
||||||
!test_range_bit(io_tree, entry_end(ordered), i_size_test - 1,
|
!test_range_bit(io_tree, entry_end(ordered), i_size_test - 1,
|
||||||
EXTENT_DELALLOC, 0)) {
|
EXTENT_DELALLOC, 0, NULL)) {
|
||||||
new_i_size = min_t(u64, i_size_test, i_size_read(inode));
|
new_i_size = min_t(u64, i_size_test, i_size_read(inode));
|
||||||
}
|
}
|
||||||
BTRFS_I(inode)->disk_i_size = new_i_size;
|
BTRFS_I(inode)->disk_i_size = new_i_size;
|
||||||
|
@ -2180,7 +2180,7 @@ static int tree_block_processed(u64 bytenr, u32 blocksize,
|
|||||||
struct reloc_control *rc)
|
struct reloc_control *rc)
|
||||||
{
|
{
|
||||||
if (test_range_bit(&rc->processed_blocks, bytenr,
|
if (test_range_bit(&rc->processed_blocks, bytenr,
|
||||||
bytenr + blocksize - 1, EXTENT_DIRTY, 1))
|
bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
|
||||||
return 1;
|
return 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user