mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 01:31:44 +00:00
btrfs: do away with non-whole_page extent I/O
end_bio_extent_readpage computes whole_page based on bv_offset and bv_len, without taking into account that blk_update_request may modify them when some of the blocks to be read into a page produce a read error. This would cause the read to unlock only part of the file range associated with the page, which would in turn leave the entire page locked, which would not only keep the process blocked instead of returning -EIO to it, but also prevent any further access to the file. It turns out that btrfs always issues whole-page reads and writes. The special handling of non-whole_page appears to be a mistake or a left-over from a time when this wasn't the case. Indeed, end_bio_extent_writepage distinguished between whole_page and non-whole_page writes but behaved identically in both cases! I've replaced the whole_page computations with warnings, just to be sure that we're not issuing partial page reads or writes. The warnings should probably just go away some time. Signed-off-by: Alexandre Oliva <oliva@gnu.org> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
This commit is contained in:
parent
b216cbfb52
commit
17a5adccf3
@ -1947,28 +1947,6 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
|
|||||||
SetPageUptodate(page);
|
SetPageUptodate(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* helper function to unlock a page if all the extents in the tree
|
|
||||||
* for that page are unlocked
|
|
||||||
*/
|
|
||||||
static void check_page_locked(struct extent_io_tree *tree, struct page *page)
|
|
||||||
{
|
|
||||||
u64 start = page_offset(page);
|
|
||||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
|
||||||
if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
|
|
||||||
unlock_page(page);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* helper function to end page writeback if all the extents
|
|
||||||
* in the tree for that page are done with writeback
|
|
||||||
*/
|
|
||||||
static void check_page_writeback(struct extent_io_tree *tree,
|
|
||||||
struct page *page)
|
|
||||||
{
|
|
||||||
end_page_writeback(page);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When IO fails, either with EIO or csum verification fails, we
|
* When IO fails, either with EIO or csum verification fails, we
|
||||||
* try other mirrors that might have a good copy of the data. This
|
* try other mirrors that might have a good copy of the data. This
|
||||||
@ -2398,19 +2376,24 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
|
|||||||
struct extent_io_tree *tree;
|
struct extent_io_tree *tree;
|
||||||
u64 start;
|
u64 start;
|
||||||
u64 end;
|
u64 end;
|
||||||
int whole_page;
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
struct page *page = bvec->bv_page;
|
struct page *page = bvec->bv_page;
|
||||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||||
|
|
||||||
start = page_offset(page) + bvec->bv_offset;
|
/* We always issue full-page reads, but if some block
|
||||||
end = start + bvec->bv_len - 1;
|
* in a page fails to read, blk_update_request() will
|
||||||
|
* advance bv_offset and adjust bv_len to compensate.
|
||||||
|
* Print a warning for nonzero offsets, and an error
|
||||||
|
* if they don't add up to a full page. */
|
||||||
|
if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
|
||||||
|
printk("%s page write in btrfs with offset %u and length %u\n",
|
||||||
|
bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
|
||||||
|
? KERN_ERR "partial" : KERN_INFO "incomplete",
|
||||||
|
bvec->bv_offset, bvec->bv_len);
|
||||||
|
|
||||||
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
|
start = page_offset(page);
|
||||||
whole_page = 1;
|
end = start + bvec->bv_offset + bvec->bv_len - 1;
|
||||||
else
|
|
||||||
whole_page = 0;
|
|
||||||
|
|
||||||
if (--bvec >= bio->bi_io_vec)
|
if (--bvec >= bio->bi_io_vec)
|
||||||
prefetchw(&bvec->bv_page->flags);
|
prefetchw(&bvec->bv_page->flags);
|
||||||
@ -2418,10 +2401,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
|
|||||||
if (end_extent_writepage(page, err, start, end))
|
if (end_extent_writepage(page, err, start, end))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (whole_page)
|
end_page_writeback(page);
|
||||||
end_page_writeback(page);
|
|
||||||
else
|
|
||||||
check_page_writeback(tree, page);
|
|
||||||
} while (bvec >= bio->bi_io_vec);
|
} while (bvec >= bio->bi_io_vec);
|
||||||
|
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
@ -2446,7 +2426,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
|||||||
struct extent_io_tree *tree;
|
struct extent_io_tree *tree;
|
||||||
u64 start;
|
u64 start;
|
||||||
u64 end;
|
u64 end;
|
||||||
int whole_page;
|
|
||||||
int mirror;
|
int mirror;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -2463,13 +2442,19 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
|||||||
(long int)bio->bi_bdev);
|
(long int)bio->bi_bdev);
|
||||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||||
|
|
||||||
start = page_offset(page) + bvec->bv_offset;
|
/* We always issue full-page reads, but if some block
|
||||||
end = start + bvec->bv_len - 1;
|
* in a page fails to read, blk_update_request() will
|
||||||
|
* advance bv_offset and adjust bv_len to compensate.
|
||||||
|
* Print a warning for nonzero offsets, and an error
|
||||||
|
* if they don't add up to a full page. */
|
||||||
|
if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
|
||||||
|
printk("%s page read in btrfs with offset %u and length %u\n",
|
||||||
|
bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
|
||||||
|
? KERN_ERR "partial" : KERN_INFO "incomplete",
|
||||||
|
bvec->bv_offset, bvec->bv_len);
|
||||||
|
|
||||||
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
|
start = page_offset(page);
|
||||||
whole_page = 1;
|
end = start + bvec->bv_offset + bvec->bv_len - 1;
|
||||||
else
|
|
||||||
whole_page = 0;
|
|
||||||
|
|
||||||
if (++bvec <= bvec_end)
|
if (++bvec <= bvec_end)
|
||||||
prefetchw(&bvec->bv_page->flags);
|
prefetchw(&bvec->bv_page->flags);
|
||||||
@ -2528,23 +2513,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
|||||||
}
|
}
|
||||||
unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
|
unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
|
||||||
|
|
||||||
if (whole_page) {
|
if (uptodate) {
|
||||||
if (uptodate) {
|
SetPageUptodate(page);
|
||||||
SetPageUptodate(page);
|
|
||||||
} else {
|
|
||||||
ClearPageUptodate(page);
|
|
||||||
SetPageError(page);
|
|
||||||
}
|
|
||||||
unlock_page(page);
|
|
||||||
} else {
|
} else {
|
||||||
if (uptodate) {
|
ClearPageUptodate(page);
|
||||||
check_page_uptodate(tree, page);
|
SetPageError(page);
|
||||||
} else {
|
|
||||||
ClearPageUptodate(page);
|
|
||||||
SetPageError(page);
|
|
||||||
}
|
|
||||||
check_page_locked(tree, page);
|
|
||||||
}
|
}
|
||||||
|
unlock_page(page);
|
||||||
} while (bvec <= bvec_end);
|
} while (bvec <= bvec_end);
|
||||||
|
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
|
Loading…
Reference in New Issue
Block a user